diff options
author | David S. Miller <davem@davemloft.net> | 2010-02-17 01:09:29 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-02-17 01:09:29 -0500 |
commit | 2bb4646fce8d09916b351d1a62f98db7cec6fc41 (patch) | |
tree | c1f0d002e69868606eca9d1b919835f422892063 /arch/x86 | |
parent | 6836b9bdd98e3b500cd49512484df68f46e14659 (diff) | |
parent | b0483e78e5c4c9871fc5541875b3bc006846d46b (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Diffstat (limited to 'arch/x86')
43 files changed, 354 insertions, 1114 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cbcbfdee3ee0..eb4092568f9e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -989,12 +989,6 @@ config X86_CPUID | |||
989 | with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to | 989 | with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to |
990 | /dev/cpu/31/cpuid. | 990 | /dev/cpu/31/cpuid. |
991 | 991 | ||
992 | config X86_CPU_DEBUG | ||
993 | tristate "/sys/kernel/debug/x86/cpu/* - CPU Debug support" | ||
994 | ---help--- | ||
995 | If you select this option, this will provide various x86 CPUs | ||
996 | information through debugfs. | ||
997 | |||
998 | choice | 992 | choice |
999 | prompt "High Memory Support" | 993 | prompt "High Memory Support" |
1000 | default HIGHMEM4G if !X86_NUMAQ | 994 | default HIGHMEM4G if !X86_NUMAQ |
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 2a4d073d2cf1..f9f472462753 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c | |||
@@ -308,14 +308,15 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
308 | if (retval) | 308 | if (retval) |
309 | return retval; | 309 | return retval; |
310 | 310 | ||
311 | regs->cs = __USER32_CS; | ||
312 | regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 = | ||
313 | regs->r13 = regs->r14 = regs->r15 = 0; | ||
314 | |||
315 | /* OK, This is the point of no return */ | 311 | /* OK, This is the point of no return */ |
316 | set_personality(PER_LINUX); | 312 | set_personality(PER_LINUX); |
317 | set_thread_flag(TIF_IA32); | 313 | set_thread_flag(TIF_IA32); |
318 | clear_thread_flag(TIF_ABI_PENDING); | 314 | |
315 | setup_new_exec(bprm); | ||
316 | |||
317 | regs->cs = __USER32_CS; | ||
318 | regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 = | ||
319 | regs->r13 = regs->r14 = regs->r15 = 0; | ||
319 | 320 | ||
320 | current->mm->end_code = ex.a_text + | 321 | current->mm->end_code = ex.a_text + |
321 | (current->mm->start_code = N_TXTADDR(ex)); | 322 | (current->mm->start_code = N_TXTADDR(ex)); |
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h index 4d817f9e6e77..d2544f1d705d 100644 --- a/arch/x86/include/asm/amd_iommu_proto.h +++ b/arch/x86/include/asm/amd_iommu_proto.h | |||
@@ -31,6 +31,7 @@ extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); | |||
31 | extern int amd_iommu_init_devices(void); | 31 | extern int amd_iommu_init_devices(void); |
32 | extern void amd_iommu_uninit_devices(void); | 32 | extern void amd_iommu_uninit_devices(void); |
33 | extern void amd_iommu_init_notifier(void); | 33 | extern void amd_iommu_init_notifier(void); |
34 | extern void amd_iommu_init_api(void); | ||
34 | #ifndef CONFIG_AMD_IOMMU_STATS | 35 | #ifndef CONFIG_AMD_IOMMU_STATS |
35 | 36 | ||
36 | static inline void amd_iommu_stats_init(void) { } | 37 | static inline void amd_iommu_stats_init(void) { } |
diff --git a/arch/x86/include/asm/cpu_debug.h b/arch/x86/include/asm/cpu_debug.h deleted file mode 100644 index d96c1ee3a95c..000000000000 --- a/arch/x86/include/asm/cpu_debug.h +++ /dev/null | |||
@@ -1,127 +0,0 @@ | |||
1 | #ifndef _ASM_X86_CPU_DEBUG_H | ||
2 | #define _ASM_X86_CPU_DEBUG_H | ||
3 | |||
4 | /* | ||
5 | * CPU x86 architecture debug | ||
6 | * | ||
7 | * Copyright(C) 2009 Jaswinder Singh Rajput | ||
8 | */ | ||
9 | |||
10 | /* Register flags */ | ||
11 | enum cpu_debug_bit { | ||
12 | /* Model Specific Registers (MSRs) */ | ||
13 | CPU_MC_BIT, /* Machine Check */ | ||
14 | CPU_MONITOR_BIT, /* Monitor */ | ||
15 | CPU_TIME_BIT, /* Time */ | ||
16 | CPU_PMC_BIT, /* Performance Monitor */ | ||
17 | CPU_PLATFORM_BIT, /* Platform */ | ||
18 | CPU_APIC_BIT, /* APIC */ | ||
19 | CPU_POWERON_BIT, /* Power-on */ | ||
20 | CPU_CONTROL_BIT, /* Control */ | ||
21 | CPU_FEATURES_BIT, /* Features control */ | ||
22 | CPU_LBRANCH_BIT, /* Last Branch */ | ||
23 | CPU_BIOS_BIT, /* BIOS */ | ||
24 | CPU_FREQ_BIT, /* Frequency */ | ||
25 | CPU_MTTR_BIT, /* MTRR */ | ||
26 | CPU_PERF_BIT, /* Performance */ | ||
27 | CPU_CACHE_BIT, /* Cache */ | ||
28 | CPU_SYSENTER_BIT, /* Sysenter */ | ||
29 | CPU_THERM_BIT, /* Thermal */ | ||
30 | CPU_MISC_BIT, /* Miscellaneous */ | ||
31 | CPU_DEBUG_BIT, /* Debug */ | ||
32 | CPU_PAT_BIT, /* PAT */ | ||
33 | CPU_VMX_BIT, /* VMX */ | ||
34 | CPU_CALL_BIT, /* System Call */ | ||
35 | CPU_BASE_BIT, /* BASE Address */ | ||
36 | CPU_VER_BIT, /* Version ID */ | ||
37 | CPU_CONF_BIT, /* Configuration */ | ||
38 | CPU_SMM_BIT, /* System mgmt mode */ | ||
39 | CPU_SVM_BIT, /*Secure Virtual Machine*/ | ||
40 | CPU_OSVM_BIT, /* OS-Visible Workaround*/ | ||
41 | /* Standard Registers */ | ||
42 | CPU_TSS_BIT, /* Task Stack Segment */ | ||
43 | CPU_CR_BIT, /* Control Registers */ | ||
44 | CPU_DT_BIT, /* Descriptor Table */ | ||
45 | /* End of Registers flags */ | ||
46 | CPU_REG_ALL_BIT, /* Select all Registers */ | ||
47 | }; | ||
48 | |||
49 | #define CPU_REG_ALL (~0) /* Select all Registers */ | ||
50 | |||
51 | #define CPU_MC (1 << CPU_MC_BIT) | ||
52 | #define CPU_MONITOR (1 << CPU_MONITOR_BIT) | ||
53 | #define CPU_TIME (1 << CPU_TIME_BIT) | ||
54 | #define CPU_PMC (1 << CPU_PMC_BIT) | ||
55 | #define CPU_PLATFORM (1 << CPU_PLATFORM_BIT) | ||
56 | #define CPU_APIC (1 << CPU_APIC_BIT) | ||
57 | #define CPU_POWERON (1 << CPU_POWERON_BIT) | ||
58 | #define CPU_CONTROL (1 << CPU_CONTROL_BIT) | ||
59 | #define CPU_FEATURES (1 << CPU_FEATURES_BIT) | ||
60 | #define CPU_LBRANCH (1 << CPU_LBRANCH_BIT) | ||
61 | #define CPU_BIOS (1 << CPU_BIOS_BIT) | ||
62 | #define CPU_FREQ (1 << CPU_FREQ_BIT) | ||
63 | #define CPU_MTRR (1 << CPU_MTTR_BIT) | ||
64 | #define CPU_PERF (1 << CPU_PERF_BIT) | ||
65 | #define CPU_CACHE (1 << CPU_CACHE_BIT) | ||
66 | #define CPU_SYSENTER (1 << CPU_SYSENTER_BIT) | ||
67 | #define CPU_THERM (1 << CPU_THERM_BIT) | ||
68 | #define CPU_MISC (1 << CPU_MISC_BIT) | ||
69 | #define CPU_DEBUG (1 << CPU_DEBUG_BIT) | ||
70 | #define CPU_PAT (1 << CPU_PAT_BIT) | ||
71 | #define CPU_VMX (1 << CPU_VMX_BIT) | ||
72 | #define CPU_CALL (1 << CPU_CALL_BIT) | ||
73 | #define CPU_BASE (1 << CPU_BASE_BIT) | ||
74 | #define CPU_VER (1 << CPU_VER_BIT) | ||
75 | #define CPU_CONF (1 << CPU_CONF_BIT) | ||
76 | #define CPU_SMM (1 << CPU_SMM_BIT) | ||
77 | #define CPU_SVM (1 << CPU_SVM_BIT) | ||
78 | #define CPU_OSVM (1 << CPU_OSVM_BIT) | ||
79 | #define CPU_TSS (1 << CPU_TSS_BIT) | ||
80 | #define CPU_CR (1 << CPU_CR_BIT) | ||
81 | #define CPU_DT (1 << CPU_DT_BIT) | ||
82 | |||
83 | /* Register file flags */ | ||
84 | enum cpu_file_bit { | ||
85 | CPU_INDEX_BIT, /* index */ | ||
86 | CPU_VALUE_BIT, /* value */ | ||
87 | }; | ||
88 | |||
89 | #define CPU_FILE_VALUE (1 << CPU_VALUE_BIT) | ||
90 | |||
91 | #define MAX_CPU_FILES 512 | ||
92 | |||
93 | struct cpu_private { | ||
94 | unsigned cpu; | ||
95 | unsigned type; | ||
96 | unsigned reg; | ||
97 | unsigned file; | ||
98 | }; | ||
99 | |||
100 | struct cpu_debug_base { | ||
101 | char *name; /* Register name */ | ||
102 | unsigned flag; /* Register flag */ | ||
103 | unsigned write; /* Register write flag */ | ||
104 | }; | ||
105 | |||
106 | /* | ||
107 | * Currently it looks similar to cpu_debug_base but once we add more files | ||
108 | * cpu_file_base will go in different direction | ||
109 | */ | ||
110 | struct cpu_file_base { | ||
111 | char *name; /* Register file name */ | ||
112 | unsigned flag; /* Register file flag */ | ||
113 | unsigned write; /* Register write flag */ | ||
114 | }; | ||
115 | |||
116 | struct cpu_cpuX_base { | ||
117 | struct dentry *dentry; /* Register dentry */ | ||
118 | int init; /* Register index file */ | ||
119 | }; | ||
120 | |||
121 | struct cpu_debug_range { | ||
122 | unsigned min; /* Register range min */ | ||
123 | unsigned max; /* Register range max */ | ||
124 | unsigned flag; /* Supported flags */ | ||
125 | }; | ||
126 | |||
127 | #endif /* _ASM_X86_CPU_DEBUG_H */ | ||
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index b4501ee223ad..f2ad2163109d 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h | |||
@@ -170,10 +170,7 @@ static inline void elf_common_init(struct thread_struct *t, | |||
170 | } | 170 | } |
171 | 171 | ||
172 | #define ELF_PLAT_INIT(_r, load_addr) \ | 172 | #define ELF_PLAT_INIT(_r, load_addr) \ |
173 | do { \ | 173 | elf_common_init(¤t->thread, _r, 0) |
174 | elf_common_init(¤t->thread, _r, 0); \ | ||
175 | clear_thread_flag(TIF_IA32); \ | ||
176 | } while (0) | ||
177 | 174 | ||
178 | #define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ | 175 | #define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ |
179 | elf_common_init(¤t->thread, regs, __USER_DS) | 176 | elf_common_init(¤t->thread, regs, __USER_DS) |
@@ -181,14 +178,8 @@ do { \ | |||
181 | void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp); | 178 | void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp); |
182 | #define compat_start_thread start_thread_ia32 | 179 | #define compat_start_thread start_thread_ia32 |
183 | 180 | ||
184 | #define COMPAT_SET_PERSONALITY(ex) \ | 181 | void set_personality_ia32(void); |
185 | do { \ | 182 | #define COMPAT_SET_PERSONALITY(ex) set_personality_ia32() |
186 | if (test_thread_flag(TIF_IA32)) \ | ||
187 | clear_thread_flag(TIF_ABI_PENDING); \ | ||
188 | else \ | ||
189 | set_thread_flag(TIF_ABI_PENDING); \ | ||
190 | current->personality |= force_personality32; \ | ||
191 | } while (0) | ||
192 | 183 | ||
193 | #define COMPAT_ELF_PLATFORM ("i686") | 184 | #define COMPAT_ELF_PLATFORM ("i686") |
194 | 185 | ||
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h index 5d89fd2a3690..1d5c08a1bdfd 100644 --- a/arch/x86/include/asm/hpet.h +++ b/arch/x86/include/asm/hpet.h | |||
@@ -67,6 +67,7 @@ extern unsigned long hpet_address; | |||
67 | extern unsigned long force_hpet_address; | 67 | extern unsigned long force_hpet_address; |
68 | extern u8 hpet_blockid; | 68 | extern u8 hpet_blockid; |
69 | extern int hpet_force_user; | 69 | extern int hpet_force_user; |
70 | extern u8 hpet_msi_disable; | ||
70 | extern int is_hpet_enabled(void); | 71 | extern int is_hpet_enabled(void); |
71 | extern int hpet_enable(void); | 72 | extern int hpet_enable(void); |
72 | extern void hpet_disable(void); | 73 | extern void hpet_disable(void); |
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index c24ca9a56458..ef51b501e22a 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h | |||
@@ -12,8 +12,6 @@ struct device; | |||
12 | enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; | 12 | enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; |
13 | 13 | ||
14 | struct microcode_ops { | 14 | struct microcode_ops { |
15 | void (*init)(struct device *device); | ||
16 | void (*fini)(void); | ||
17 | enum ucode_state (*request_microcode_user) (int cpu, | 15 | enum ucode_state (*request_microcode_user) (int cpu, |
18 | const void __user *buf, size_t size); | 16 | const void __user *buf, size_t size); |
19 | 17 | ||
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index ecb544e65382..e04740f7a0bb 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h | |||
@@ -11,9 +11,9 @@ | |||
11 | #include <linux/irqflags.h> | 11 | #include <linux/irqflags.h> |
12 | 12 | ||
13 | /* entries in ARCH_DLINFO: */ | 13 | /* entries in ARCH_DLINFO: */ |
14 | #ifdef CONFIG_IA32_EMULATION | 14 | #if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64) |
15 | # define AT_VECTOR_SIZE_ARCH 2 | 15 | # define AT_VECTOR_SIZE_ARCH 2 |
16 | #else | 16 | #else /* else it's non-compat x86-64 */ |
17 | # define AT_VECTOR_SIZE_ARCH 1 | 17 | # define AT_VECTOR_SIZE_ARCH 1 |
18 | #endif | 18 | #endif |
19 | 19 | ||
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 375c917c37d2..e0d28901e969 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -87,7 +87,6 @@ struct thread_info { | |||
87 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ | 87 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ |
88 | #define TIF_IA32 17 /* 32bit process */ | 88 | #define TIF_IA32 17 /* 32bit process */ |
89 | #define TIF_FORK 18 /* ret_from_fork */ | 89 | #define TIF_FORK 18 /* ret_from_fork */ |
90 | #define TIF_ABI_PENDING 19 | ||
91 | #define TIF_MEMDIE 20 | 90 | #define TIF_MEMDIE 20 |
92 | #define TIF_DEBUG 21 /* uses debug registers */ | 91 | #define TIF_DEBUG 21 /* uses debug registers */ |
93 | #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ | 92 | #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ |
@@ -112,7 +111,6 @@ struct thread_info { | |||
112 | #define _TIF_NOTSC (1 << TIF_NOTSC) | 111 | #define _TIF_NOTSC (1 << TIF_NOTSC) |
113 | #define _TIF_IA32 (1 << TIF_IA32) | 112 | #define _TIF_IA32 (1 << TIF_IA32) |
114 | #define _TIF_FORK (1 << TIF_FORK) | 113 | #define _TIF_FORK (1 << TIF_FORK) |
115 | #define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING) | ||
116 | #define _TIF_DEBUG (1 << TIF_DEBUG) | 114 | #define _TIF_DEBUG (1 << TIF_DEBUG) |
117 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) | 115 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) |
118 | #define _TIF_FREEZE (1 << TIF_FREEZE) | 116 | #define _TIF_FREEZE (1 << TIF_FREEZE) |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 036d28adf59d..0acbcdfa5ca4 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -1185,9 +1185,6 @@ static void __init acpi_process_madt(void) | |||
1185 | if (!error) { | 1185 | if (!error) { |
1186 | acpi_lapic = 1; | 1186 | acpi_lapic = 1; |
1187 | 1187 | ||
1188 | #ifdef CONFIG_X86_BIGSMP | ||
1189 | generic_bigsmp_probe(); | ||
1190 | #endif | ||
1191 | /* | 1188 | /* |
1192 | * Parse MADT IO-APIC entries | 1189 | * Parse MADT IO-APIC entries |
1193 | */ | 1190 | */ |
@@ -1197,8 +1194,6 @@ static void __init acpi_process_madt(void) | |||
1197 | acpi_ioapic = 1; | 1194 | acpi_ioapic = 1; |
1198 | 1195 | ||
1199 | smp_found_config = 1; | 1196 | smp_found_config = 1; |
1200 | if (apic->setup_apic_routing) | ||
1201 | apic->setup_apic_routing(); | ||
1202 | } | 1197 | } |
1203 | } | 1198 | } |
1204 | if (error == -EINVAL) { | 1199 | if (error == -EINVAL) { |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 23824fef789c..adb0ba025702 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -980,7 +980,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, | |||
980 | { | 980 | { |
981 | int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; | 981 | int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; |
982 | struct amd_iommu *iommu; | 982 | struct amd_iommu *iommu; |
983 | int i; | 983 | unsigned long i; |
984 | 984 | ||
985 | #ifdef CONFIG_IOMMU_STRESS | 985 | #ifdef CONFIG_IOMMU_STRESS |
986 | populate = false; | 986 | populate = false; |
@@ -1489,11 +1489,14 @@ static void __detach_device(struct device *dev) | |||
1489 | { | 1489 | { |
1490 | struct iommu_dev_data *dev_data = get_dev_data(dev); | 1490 | struct iommu_dev_data *dev_data = get_dev_data(dev); |
1491 | struct iommu_dev_data *alias_data; | 1491 | struct iommu_dev_data *alias_data; |
1492 | struct protection_domain *domain; | ||
1492 | unsigned long flags; | 1493 | unsigned long flags; |
1493 | 1494 | ||
1494 | BUG_ON(!dev_data->domain); | 1495 | BUG_ON(!dev_data->domain); |
1495 | 1496 | ||
1496 | spin_lock_irqsave(&dev_data->domain->lock, flags); | 1497 | domain = dev_data->domain; |
1498 | |||
1499 | spin_lock_irqsave(&domain->lock, flags); | ||
1497 | 1500 | ||
1498 | if (dev_data->alias != dev) { | 1501 | if (dev_data->alias != dev) { |
1499 | alias_data = get_dev_data(dev_data->alias); | 1502 | alias_data = get_dev_data(dev_data->alias); |
@@ -1504,13 +1507,15 @@ static void __detach_device(struct device *dev) | |||
1504 | if (atomic_dec_and_test(&dev_data->bind)) | 1507 | if (atomic_dec_and_test(&dev_data->bind)) |
1505 | do_detach(dev); | 1508 | do_detach(dev); |
1506 | 1509 | ||
1507 | spin_unlock_irqrestore(&dev_data->domain->lock, flags); | 1510 | spin_unlock_irqrestore(&domain->lock, flags); |
1508 | 1511 | ||
1509 | /* | 1512 | /* |
1510 | * If we run in passthrough mode the device must be assigned to the | 1513 | * If we run in passthrough mode the device must be assigned to the |
1511 | * passthrough domain if it is detached from any other domain | 1514 | * passthrough domain if it is detached from any other domain. |
1515 | * Make sure we can deassign from the pt_domain itself. | ||
1512 | */ | 1516 | */ |
1513 | if (iommu_pass_through && dev_data->domain == NULL) | 1517 | if (iommu_pass_through && |
1518 | (dev_data->domain == NULL && domain != pt_domain)) | ||
1514 | __attach_device(dev, pt_domain); | 1519 | __attach_device(dev, pt_domain); |
1515 | } | 1520 | } |
1516 | 1521 | ||
@@ -2218,6 +2223,12 @@ static struct dma_map_ops amd_iommu_dma_ops = { | |||
2218 | /* | 2223 | /* |
2219 | * The function which clues the AMD IOMMU driver into dma_ops. | 2224 | * The function which clues the AMD IOMMU driver into dma_ops. |
2220 | */ | 2225 | */ |
2226 | |||
2227 | void __init amd_iommu_init_api(void) | ||
2228 | { | ||
2229 | register_iommu(&amd_iommu_ops); | ||
2230 | } | ||
2231 | |||
2221 | int __init amd_iommu_init_dma_ops(void) | 2232 | int __init amd_iommu_init_dma_ops(void) |
2222 | { | 2233 | { |
2223 | struct amd_iommu *iommu; | 2234 | struct amd_iommu *iommu; |
@@ -2253,8 +2264,6 @@ int __init amd_iommu_init_dma_ops(void) | |||
2253 | /* Make the driver finally visible to the drivers */ | 2264 | /* Make the driver finally visible to the drivers */ |
2254 | dma_ops = &amd_iommu_dma_ops; | 2265 | dma_ops = &amd_iommu_dma_ops; |
2255 | 2266 | ||
2256 | register_iommu(&amd_iommu_ops); | ||
2257 | |||
2258 | amd_iommu_stats_init(); | 2267 | amd_iommu_stats_init(); |
2259 | 2268 | ||
2260 | return 0; | 2269 | return 0; |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index fb490ce7dd55..9dc91b431470 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -1292,9 +1292,12 @@ static int __init amd_iommu_init(void) | |||
1292 | ret = amd_iommu_init_passthrough(); | 1292 | ret = amd_iommu_init_passthrough(); |
1293 | else | 1293 | else |
1294 | ret = amd_iommu_init_dma_ops(); | 1294 | ret = amd_iommu_init_dma_ops(); |
1295 | |||
1295 | if (ret) | 1296 | if (ret) |
1296 | goto free; | 1297 | goto free; |
1297 | 1298 | ||
1299 | amd_iommu_init_api(); | ||
1300 | |||
1298 | amd_iommu_init_notifier(); | 1301 | amd_iommu_init_notifier(); |
1299 | 1302 | ||
1300 | enable_iommus(); | 1303 | enable_iommus(); |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 3987e4408f75..dfca210f6a10 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -1641,9 +1641,7 @@ int __init APIC_init_uniprocessor(void) | |||
1641 | #endif | 1641 | #endif |
1642 | 1642 | ||
1643 | enable_IR_x2apic(); | 1643 | enable_IR_x2apic(); |
1644 | #ifdef CONFIG_X86_64 | ||
1645 | default_setup_apic_routing(); | 1644 | default_setup_apic_routing(); |
1646 | #endif | ||
1647 | 1645 | ||
1648 | verify_local_APIC(); | 1646 | verify_local_APIC(); |
1649 | connect_bsp_APIC(); | 1647 | connect_bsp_APIC(); |
@@ -1891,21 +1889,6 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
1891 | if (apicid > max_physical_apicid) | 1889 | if (apicid > max_physical_apicid) |
1892 | max_physical_apicid = apicid; | 1890 | max_physical_apicid = apicid; |
1893 | 1891 | ||
1894 | #ifdef CONFIG_X86_32 | ||
1895 | if (num_processors > 8) { | ||
1896 | switch (boot_cpu_data.x86_vendor) { | ||
1897 | case X86_VENDOR_INTEL: | ||
1898 | if (!APIC_XAPIC(version)) { | ||
1899 | def_to_bigsmp = 0; | ||
1900 | break; | ||
1901 | } | ||
1902 | /* If P4 and above fall through */ | ||
1903 | case X86_VENDOR_AMD: | ||
1904 | def_to_bigsmp = 1; | ||
1905 | } | ||
1906 | } | ||
1907 | #endif | ||
1908 | |||
1909 | #if defined(CONFIG_SMP) || defined(CONFIG_X86_64) | 1892 | #if defined(CONFIG_SMP) || defined(CONFIG_X86_64) |
1910 | early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; | 1893 | early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; |
1911 | early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; | 1894 | early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; |
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 1a6559f6768c..99d2fe016084 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c | |||
@@ -52,7 +52,32 @@ static int __init print_ipi_mode(void) | |||
52 | } | 52 | } |
53 | late_initcall(print_ipi_mode); | 53 | late_initcall(print_ipi_mode); |
54 | 54 | ||
55 | void default_setup_apic_routing(void) | 55 | void __init default_setup_apic_routing(void) |
56 | { | ||
57 | int version = apic_version[boot_cpu_physical_apicid]; | ||
58 | |||
59 | if (num_possible_cpus() > 8) { | ||
60 | switch (boot_cpu_data.x86_vendor) { | ||
61 | case X86_VENDOR_INTEL: | ||
62 | if (!APIC_XAPIC(version)) { | ||
63 | def_to_bigsmp = 0; | ||
64 | break; | ||
65 | } | ||
66 | /* If P4 and above fall through */ | ||
67 | case X86_VENDOR_AMD: | ||
68 | def_to_bigsmp = 1; | ||
69 | } | ||
70 | } | ||
71 | |||
72 | #ifdef CONFIG_X86_BIGSMP | ||
73 | generic_bigsmp_probe(); | ||
74 | #endif | ||
75 | |||
76 | if (apic->setup_apic_routing) | ||
77 | apic->setup_apic_routing(); | ||
78 | } | ||
79 | |||
80 | static void setup_apic_flat_routing(void) | ||
56 | { | 81 | { |
57 | #ifdef CONFIG_X86_IO_APIC | 82 | #ifdef CONFIG_X86_IO_APIC |
58 | printk(KERN_INFO | 83 | printk(KERN_INFO |
@@ -103,7 +128,7 @@ struct apic apic_default = { | |||
103 | .init_apic_ldr = default_init_apic_ldr, | 128 | .init_apic_ldr = default_init_apic_ldr, |
104 | 129 | ||
105 | .ioapic_phys_id_map = default_ioapic_phys_id_map, | 130 | .ioapic_phys_id_map = default_ioapic_phys_id_map, |
106 | .setup_apic_routing = default_setup_apic_routing, | 131 | .setup_apic_routing = setup_apic_flat_routing, |
107 | .multi_timer_check = NULL, | 132 | .multi_timer_check = NULL, |
108 | .apicid_to_node = default_apicid_to_node, | 133 | .apicid_to_node = default_apicid_to_node, |
109 | .cpu_to_logical_apicid = default_cpu_to_logical_apicid, | 134 | .cpu_to_logical_apicid = default_cpu_to_logical_apicid, |
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index 450fe2064a14..83e9be4778e2 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c | |||
@@ -67,7 +67,7 @@ void __init default_setup_apic_routing(void) | |||
67 | } | 67 | } |
68 | #endif | 68 | #endif |
69 | 69 | ||
70 | if (apic == &apic_flat && num_processors > 8) | 70 | if (apic == &apic_flat && num_possible_cpus() > 8) |
71 | apic = &apic_physflat; | 71 | apic = &apic_physflat; |
72 | 72 | ||
73 | printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); | 73 | printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); |
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 1d2cb383410e..c202b62f3671 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -19,8 +19,6 @@ obj-y += vmware.o hypervisor.o sched.o | |||
19 | obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o | 19 | obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o |
20 | obj-$(CONFIG_X86_64) += bugs_64.o | 20 | obj-$(CONFIG_X86_64) += bugs_64.o |
21 | 21 | ||
22 | obj-$(CONFIG_X86_CPU_DEBUG) += cpu_debug.o | ||
23 | |||
24 | obj-$(CONFIG_CPU_SUP_INTEL) += intel.o | 22 | obj-$(CONFIG_CPU_SUP_INTEL) += intel.o |
25 | obj-$(CONFIG_CPU_SUP_AMD) += amd.o | 23 | obj-$(CONFIG_CPU_SUP_AMD) += amd.o |
26 | obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o | 24 | obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o |
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c deleted file mode 100644 index b368cd862997..000000000000 --- a/arch/x86/kernel/cpu/cpu_debug.c +++ /dev/null | |||
@@ -1,688 +0,0 @@ | |||
1 | /* | ||
2 | * CPU x86 architecture debug code | ||
3 | * | ||
4 | * Copyright(C) 2009 Jaswinder Singh Rajput | ||
5 | * | ||
6 | * For licencing details see kernel-base/COPYING | ||
7 | */ | ||
8 | |||
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/compiler.h> | ||
11 | #include <linux/seq_file.h> | ||
12 | #include <linux/debugfs.h> | ||
13 | #include <linux/kprobes.h> | ||
14 | #include <linux/uaccess.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/percpu.h> | ||
18 | #include <linux/signal.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/smp.h> | ||
25 | |||
26 | #include <asm/cpu_debug.h> | ||
27 | #include <asm/paravirt.h> | ||
28 | #include <asm/system.h> | ||
29 | #include <asm/traps.h> | ||
30 | #include <asm/apic.h> | ||
31 | #include <asm/desc.h> | ||
32 | |||
33 | static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpud_arr); | ||
34 | static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], cpud_priv_arr); | ||
35 | static DEFINE_PER_CPU(int, cpud_priv_count); | ||
36 | |||
37 | static DEFINE_MUTEX(cpu_debug_lock); | ||
38 | |||
39 | static struct dentry *cpu_debugfs_dir; | ||
40 | |||
41 | static struct cpu_debug_base cpu_base[] = { | ||
42 | { "mc", CPU_MC, 0 }, | ||
43 | { "monitor", CPU_MONITOR, 0 }, | ||
44 | { "time", CPU_TIME, 0 }, | ||
45 | { "pmc", CPU_PMC, 1 }, | ||
46 | { "platform", CPU_PLATFORM, 0 }, | ||
47 | { "apic", CPU_APIC, 0 }, | ||
48 | { "poweron", CPU_POWERON, 0 }, | ||
49 | { "control", CPU_CONTROL, 0 }, | ||
50 | { "features", CPU_FEATURES, 0 }, | ||
51 | { "lastbranch", CPU_LBRANCH, 0 }, | ||
52 | { "bios", CPU_BIOS, 0 }, | ||
53 | { "freq", CPU_FREQ, 0 }, | ||
54 | { "mtrr", CPU_MTRR, 0 }, | ||
55 | { "perf", CPU_PERF, 0 }, | ||
56 | { "cache", CPU_CACHE, 0 }, | ||
57 | { "sysenter", CPU_SYSENTER, 0 }, | ||
58 | { "therm", CPU_THERM, 0 }, | ||
59 | { "misc", CPU_MISC, 0 }, | ||
60 | { "debug", CPU_DEBUG, 0 }, | ||
61 | { "pat", CPU_PAT, 0 }, | ||
62 | { "vmx", CPU_VMX, 0 }, | ||
63 | { "call", CPU_CALL, 0 }, | ||
64 | { "base", CPU_BASE, 0 }, | ||
65 | { "ver", CPU_VER, 0 }, | ||
66 | { "conf", CPU_CONF, 0 }, | ||
67 | { "smm", CPU_SMM, 0 }, | ||
68 | { "svm", CPU_SVM, 0 }, | ||
69 | { "osvm", CPU_OSVM, 0 }, | ||
70 | { "tss", CPU_TSS, 0 }, | ||
71 | { "cr", CPU_CR, 0 }, | ||
72 | { "dt", CPU_DT, 0 }, | ||
73 | { "registers", CPU_REG_ALL, 0 }, | ||
74 | }; | ||
75 | |||
76 | static struct cpu_file_base cpu_file[] = { | ||
77 | { "index", CPU_REG_ALL, 0 }, | ||
78 | { "value", CPU_REG_ALL, 1 }, | ||
79 | }; | ||
80 | |||
81 | /* CPU Registers Range */ | ||
82 | static struct cpu_debug_range cpu_reg_range[] = { | ||
83 | { 0x00000000, 0x00000001, CPU_MC, }, | ||
84 | { 0x00000006, 0x00000007, CPU_MONITOR, }, | ||
85 | { 0x00000010, 0x00000010, CPU_TIME, }, | ||
86 | { 0x00000011, 0x00000013, CPU_PMC, }, | ||
87 | { 0x00000017, 0x00000017, CPU_PLATFORM, }, | ||
88 | { 0x0000001B, 0x0000001B, CPU_APIC, }, | ||
89 | { 0x0000002A, 0x0000002B, CPU_POWERON, }, | ||
90 | { 0x0000002C, 0x0000002C, CPU_FREQ, }, | ||
91 | { 0x0000003A, 0x0000003A, CPU_CONTROL, }, | ||
92 | { 0x00000040, 0x00000047, CPU_LBRANCH, }, | ||
93 | { 0x00000060, 0x00000067, CPU_LBRANCH, }, | ||
94 | { 0x00000079, 0x00000079, CPU_BIOS, }, | ||
95 | { 0x00000088, 0x0000008A, CPU_CACHE, }, | ||
96 | { 0x0000008B, 0x0000008B, CPU_BIOS, }, | ||
97 | { 0x0000009B, 0x0000009B, CPU_MONITOR, }, | ||
98 | { 0x000000C1, 0x000000C4, CPU_PMC, }, | ||
99 | { 0x000000CD, 0x000000CD, CPU_FREQ, }, | ||
100 | { 0x000000E7, 0x000000E8, CPU_PERF, }, | ||
101 | { 0x000000FE, 0x000000FE, CPU_MTRR, }, | ||
102 | |||
103 | { 0x00000116, 0x0000011E, CPU_CACHE, }, | ||
104 | { 0x00000174, 0x00000176, CPU_SYSENTER, }, | ||
105 | { 0x00000179, 0x0000017B, CPU_MC, }, | ||
106 | { 0x00000186, 0x00000189, CPU_PMC, }, | ||
107 | { 0x00000198, 0x00000199, CPU_PERF, }, | ||
108 | { 0x0000019A, 0x0000019A, CPU_TIME, }, | ||
109 | { 0x0000019B, 0x0000019D, CPU_THERM, }, | ||
110 | { 0x000001A0, 0x000001A0, CPU_MISC, }, | ||
111 | { 0x000001C9, 0x000001C9, CPU_LBRANCH, }, | ||
112 | { 0x000001D7, 0x000001D8, CPU_LBRANCH, }, | ||
113 | { 0x000001D9, 0x000001D9, CPU_DEBUG, }, | ||
114 | { 0x000001DA, 0x000001E0, CPU_LBRANCH, }, | ||
115 | |||
116 | { 0x00000200, 0x0000020F, CPU_MTRR, }, | ||
117 | { 0x00000250, 0x00000250, CPU_MTRR, }, | ||
118 | { 0x00000258, 0x00000259, CPU_MTRR, }, | ||
119 | { 0x00000268, 0x0000026F, CPU_MTRR, }, | ||
120 | { 0x00000277, 0x00000277, CPU_PAT, }, | ||
121 | { 0x000002FF, 0x000002FF, CPU_MTRR, }, | ||
122 | |||
123 | { 0x00000300, 0x00000311, CPU_PMC, }, | ||
124 | { 0x00000345, 0x00000345, CPU_PMC, }, | ||
125 | { 0x00000360, 0x00000371, CPU_PMC, }, | ||
126 | { 0x0000038D, 0x00000390, CPU_PMC, }, | ||
127 | { 0x000003A0, 0x000003BE, CPU_PMC, }, | ||
128 | { 0x000003C0, 0x000003CD, CPU_PMC, }, | ||
129 | { 0x000003E0, 0x000003E1, CPU_PMC, }, | ||
130 | { 0x000003F0, 0x000003F2, CPU_PMC, }, | ||
131 | |||
132 | { 0x00000400, 0x00000417, CPU_MC, }, | ||
133 | { 0x00000480, 0x0000048B, CPU_VMX, }, | ||
134 | |||
135 | { 0x00000600, 0x00000600, CPU_DEBUG, }, | ||
136 | { 0x00000680, 0x0000068F, CPU_LBRANCH, }, | ||
137 | { 0x000006C0, 0x000006CF, CPU_LBRANCH, }, | ||
138 | |||
139 | { 0x000107CC, 0x000107D3, CPU_PMC, }, | ||
140 | |||
141 | { 0xC0000080, 0xC0000080, CPU_FEATURES, }, | ||
142 | { 0xC0000081, 0xC0000084, CPU_CALL, }, | ||
143 | { 0xC0000100, 0xC0000102, CPU_BASE, }, | ||
144 | { 0xC0000103, 0xC0000103, CPU_TIME, }, | ||
145 | |||
146 | { 0xC0010000, 0xC0010007, CPU_PMC, }, | ||
147 | { 0xC0010010, 0xC0010010, CPU_CONF, }, | ||
148 | { 0xC0010015, 0xC0010015, CPU_CONF, }, | ||
149 | { 0xC0010016, 0xC001001A, CPU_MTRR, }, | ||
150 | { 0xC001001D, 0xC001001D, CPU_MTRR, }, | ||
151 | { 0xC001001F, 0xC001001F, CPU_CONF, }, | ||
152 | { 0xC0010030, 0xC0010035, CPU_BIOS, }, | ||
153 | { 0xC0010044, 0xC0010048, CPU_MC, }, | ||
154 | { 0xC0010050, 0xC0010056, CPU_SMM, }, | ||
155 | { 0xC0010058, 0xC0010058, CPU_CONF, }, | ||
156 | { 0xC0010060, 0xC0010060, CPU_CACHE, }, | ||
157 | { 0xC0010061, 0xC0010068, CPU_SMM, }, | ||
158 | { 0xC0010069, 0xC001006B, CPU_SMM, }, | ||
159 | { 0xC0010070, 0xC0010071, CPU_SMM, }, | ||
160 | { 0xC0010111, 0xC0010113, CPU_SMM, }, | ||
161 | { 0xC0010114, 0xC0010118, CPU_SVM, }, | ||
162 | { 0xC0010140, 0xC0010141, CPU_OSVM, }, | ||
163 | { 0xC0011022, 0xC0011023, CPU_CONF, }, | ||
164 | }; | ||
165 | |||
166 | static int is_typeflag_valid(unsigned cpu, unsigned flag) | ||
167 | { | ||
168 | int i; | ||
169 | |||
170 | /* Standard Registers should be always valid */ | ||
171 | if (flag >= CPU_TSS) | ||
172 | return 1; | ||
173 | |||
174 | for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) { | ||
175 | if (cpu_reg_range[i].flag == flag) | ||
176 | return 1; | ||
177 | } | ||
178 | |||
179 | /* Invalid */ | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max, | ||
184 | int index, unsigned flag) | ||
185 | { | ||
186 | if (cpu_reg_range[index].flag == flag) { | ||
187 | *min = cpu_reg_range[index].min; | ||
188 | *max = cpu_reg_range[index].max; | ||
189 | } else | ||
190 | *max = 0; | ||
191 | |||
192 | return *max; | ||
193 | } | ||
194 | |||
195 | /* This function can also be called with seq = NULL for printk */ | ||
196 | static void print_cpu_data(struct seq_file *seq, unsigned type, | ||
197 | u32 low, u32 high) | ||
198 | { | ||
199 | struct cpu_private *priv; | ||
200 | u64 val = high; | ||
201 | |||
202 | if (seq) { | ||
203 | priv = seq->private; | ||
204 | if (priv->file) { | ||
205 | val = (val << 32) | low; | ||
206 | seq_printf(seq, "0x%llx\n", val); | ||
207 | } else | ||
208 | seq_printf(seq, " %08x: %08x_%08x\n", | ||
209 | type, high, low); | ||
210 | } else | ||
211 | printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low); | ||
212 | } | ||
213 | |||
214 | /* This function can also be called with seq = NULL for printk */ | ||
215 | static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag) | ||
216 | { | ||
217 | unsigned msr, msr_min, msr_max; | ||
218 | struct cpu_private *priv; | ||
219 | u32 low, high; | ||
220 | int i; | ||
221 | |||
222 | if (seq) { | ||
223 | priv = seq->private; | ||
224 | if (priv->file) { | ||
225 | if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg, | ||
226 | &low, &high)) | ||
227 | print_cpu_data(seq, priv->reg, low, high); | ||
228 | return; | ||
229 | } | ||
230 | } | ||
231 | |||
232 | for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) { | ||
233 | if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag)) | ||
234 | continue; | ||
235 | |||
236 | for (msr = msr_min; msr <= msr_max; msr++) { | ||
237 | if (rdmsr_safe_on_cpu(cpu, msr, &low, &high)) | ||
238 | continue; | ||
239 | print_cpu_data(seq, msr, low, high); | ||
240 | } | ||
241 | } | ||
242 | } | ||
243 | |||
244 | static void print_tss(void *arg) | ||
245 | { | ||
246 | struct pt_regs *regs = task_pt_regs(current); | ||
247 | struct seq_file *seq = arg; | ||
248 | unsigned int seg; | ||
249 | |||
250 | seq_printf(seq, " RAX\t: %016lx\n", regs->ax); | ||
251 | seq_printf(seq, " RBX\t: %016lx\n", regs->bx); | ||
252 | seq_printf(seq, " RCX\t: %016lx\n", regs->cx); | ||
253 | seq_printf(seq, " RDX\t: %016lx\n", regs->dx); | ||
254 | |||
255 | seq_printf(seq, " RSI\t: %016lx\n", regs->si); | ||
256 | seq_printf(seq, " RDI\t: %016lx\n", regs->di); | ||
257 | seq_printf(seq, " RBP\t: %016lx\n", regs->bp); | ||
258 | seq_printf(seq, " ESP\t: %016lx\n", regs->sp); | ||
259 | |||
260 | #ifdef CONFIG_X86_64 | ||
261 | seq_printf(seq, " R08\t: %016lx\n", regs->r8); | ||
262 | seq_printf(seq, " R09\t: %016lx\n", regs->r9); | ||
263 | seq_printf(seq, " R10\t: %016lx\n", regs->r10); | ||
264 | seq_printf(seq, " R11\t: %016lx\n", regs->r11); | ||
265 | seq_printf(seq, " R12\t: %016lx\n", regs->r12); | ||
266 | seq_printf(seq, " R13\t: %016lx\n", regs->r13); | ||
267 | seq_printf(seq, " R14\t: %016lx\n", regs->r14); | ||
268 | seq_printf(seq, " R15\t: %016lx\n", regs->r15); | ||
269 | #endif | ||
270 | |||
271 | asm("movl %%cs,%0" : "=r" (seg)); | ||
272 | seq_printf(seq, " CS\t: %04x\n", seg); | ||
273 | asm("movl %%ds,%0" : "=r" (seg)); | ||
274 | seq_printf(seq, " DS\t: %04x\n", seg); | ||
275 | seq_printf(seq, " SS\t: %04lx\n", regs->ss & 0xffff); | ||
276 | asm("movl %%es,%0" : "=r" (seg)); | ||
277 | seq_printf(seq, " ES\t: %04x\n", seg); | ||
278 | asm("movl %%fs,%0" : "=r" (seg)); | ||
279 | seq_printf(seq, " FS\t: %04x\n", seg); | ||
280 | asm("movl %%gs,%0" : "=r" (seg)); | ||
281 | seq_printf(seq, " GS\t: %04x\n", seg); | ||
282 | |||
283 | seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags); | ||
284 | |||
285 | seq_printf(seq, " EIP\t: %016lx\n", regs->ip); | ||
286 | } | ||
287 | |||
288 | static void print_cr(void *arg) | ||
289 | { | ||
290 | struct seq_file *seq = arg; | ||
291 | |||
292 | seq_printf(seq, " cr0\t: %016lx\n", read_cr0()); | ||
293 | seq_printf(seq, " cr2\t: %016lx\n", read_cr2()); | ||
294 | seq_printf(seq, " cr3\t: %016lx\n", read_cr3()); | ||
295 | seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe()); | ||
296 | #ifdef CONFIG_X86_64 | ||
297 | seq_printf(seq, " cr8\t: %016lx\n", read_cr8()); | ||
298 | #endif | ||
299 | } | ||
300 | |||
301 | static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt) | ||
302 | { | ||
303 | seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size)); | ||
304 | } | ||
305 | |||
306 | static void print_dt(void *seq) | ||
307 | { | ||
308 | struct desc_ptr dt; | ||
309 | unsigned long ldt; | ||
310 | |||
311 | /* IDT */ | ||
312 | store_idt((struct desc_ptr *)&dt); | ||
313 | print_desc_ptr("IDT", seq, dt); | ||
314 | |||
315 | /* GDT */ | ||
316 | store_gdt((struct desc_ptr *)&dt); | ||
317 | print_desc_ptr("GDT", seq, dt); | ||
318 | |||
319 | /* LDT */ | ||
320 | store_ldt(ldt); | ||
321 | seq_printf(seq, " LDT\t: %016lx\n", ldt); | ||
322 | |||
323 | /* TR */ | ||
324 | store_tr(ldt); | ||
325 | seq_printf(seq, " TR\t: %016lx\n", ldt); | ||
326 | } | ||
327 | |||
328 | static void print_dr(void *arg) | ||
329 | { | ||
330 | struct seq_file *seq = arg; | ||
331 | unsigned long dr; | ||
332 | int i; | ||
333 | |||
334 | for (i = 0; i < 8; i++) { | ||
335 | /* Ignore db4, db5 */ | ||
336 | if ((i == 4) || (i == 5)) | ||
337 | continue; | ||
338 | get_debugreg(dr, i); | ||
339 | seq_printf(seq, " dr%d\t: %016lx\n", i, dr); | ||
340 | } | ||
341 | |||
342 | seq_printf(seq, "\n MSR\t:\n"); | ||
343 | } | ||
344 | |||
345 | static void print_apic(void *arg) | ||
346 | { | ||
347 | struct seq_file *seq = arg; | ||
348 | |||
349 | #ifdef CONFIG_X86_LOCAL_APIC | ||
350 | seq_printf(seq, " LAPIC\t:\n"); | ||
351 | seq_printf(seq, " ID\t\t: %08x\n", apic_read(APIC_ID) >> 24); | ||
352 | seq_printf(seq, " LVR\t\t: %08x\n", apic_read(APIC_LVR)); | ||
353 | seq_printf(seq, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI)); | ||
354 | seq_printf(seq, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI)); | ||
355 | seq_printf(seq, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI)); | ||
356 | seq_printf(seq, " LDR\t\t: %08x\n", apic_read(APIC_LDR)); | ||
357 | seq_printf(seq, " DFR\t\t: %08x\n", apic_read(APIC_DFR)); | ||
358 | seq_printf(seq, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV)); | ||
359 | seq_printf(seq, " ISR\t\t: %08x\n", apic_read(APIC_ISR)); | ||
360 | seq_printf(seq, " ESR\t\t: %08x\n", apic_read(APIC_ESR)); | ||
361 | seq_printf(seq, " ICR\t\t: %08x\n", apic_read(APIC_ICR)); | ||
362 | seq_printf(seq, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2)); | ||
363 | seq_printf(seq, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT)); | ||
364 | seq_printf(seq, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR)); | ||
365 | seq_printf(seq, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC)); | ||
366 | seq_printf(seq, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0)); | ||
367 | seq_printf(seq, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1)); | ||
368 | seq_printf(seq, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR)); | ||
369 | seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT)); | ||
370 | seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT)); | ||
371 | seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR)); | ||
372 | if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { | ||
373 | unsigned int i, v, maxeilvt; | ||
374 | |||
375 | v = apic_read(APIC_EFEAT); | ||
376 | maxeilvt = (v >> 16) & 0xff; | ||
377 | seq_printf(seq, " EFEAT\t\t: %08x\n", v); | ||
378 | seq_printf(seq, " ECTRL\t\t: %08x\n", apic_read(APIC_ECTRL)); | ||
379 | |||
380 | for (i = 0; i < maxeilvt; i++) { | ||
381 | v = apic_read(APIC_EILVTn(i)); | ||
382 | seq_printf(seq, " EILVT%d\t\t: %08x\n", i, v); | ||
383 | } | ||
384 | } | ||
385 | #endif /* CONFIG_X86_LOCAL_APIC */ | ||
386 | seq_printf(seq, "\n MSR\t:\n"); | ||
387 | } | ||
388 | |||
389 | static int cpu_seq_show(struct seq_file *seq, void *v) | ||
390 | { | ||
391 | struct cpu_private *priv = seq->private; | ||
392 | |||
393 | if (priv == NULL) | ||
394 | return -EINVAL; | ||
395 | |||
396 | switch (cpu_base[priv->type].flag) { | ||
397 | case CPU_TSS: | ||
398 | smp_call_function_single(priv->cpu, print_tss, seq, 1); | ||
399 | break; | ||
400 | case CPU_CR: | ||
401 | smp_call_function_single(priv->cpu, print_cr, seq, 1); | ||
402 | break; | ||
403 | case CPU_DT: | ||
404 | smp_call_function_single(priv->cpu, print_dt, seq, 1); | ||
405 | break; | ||
406 | case CPU_DEBUG: | ||
407 | if (priv->file == CPU_INDEX_BIT) | ||
408 | smp_call_function_single(priv->cpu, print_dr, seq, 1); | ||
409 | print_msr(seq, priv->cpu, cpu_base[priv->type].flag); | ||
410 | break; | ||
411 | case CPU_APIC: | ||
412 | if (priv->file == CPU_INDEX_BIT) | ||
413 | smp_call_function_single(priv->cpu, print_apic, seq, 1); | ||
414 | print_msr(seq, priv->cpu, cpu_base[priv->type].flag); | ||
415 | break; | ||
416 | |||
417 | default: | ||
418 | print_msr(seq, priv->cpu, cpu_base[priv->type].flag); | ||
419 | break; | ||
420 | } | ||
421 | seq_printf(seq, "\n"); | ||
422 | |||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | static void *cpu_seq_start(struct seq_file *seq, loff_t *pos) | ||
427 | { | ||
428 | if (*pos == 0) /* One time is enough ;-) */ | ||
429 | return seq; | ||
430 | |||
431 | return NULL; | ||
432 | } | ||
433 | |||
434 | static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
435 | { | ||
436 | (*pos)++; | ||
437 | |||
438 | return cpu_seq_start(seq, pos); | ||
439 | } | ||
440 | |||
441 | static void cpu_seq_stop(struct seq_file *seq, void *v) | ||
442 | { | ||
443 | } | ||
444 | |||
445 | static const struct seq_operations cpu_seq_ops = { | ||
446 | .start = cpu_seq_start, | ||
447 | .next = cpu_seq_next, | ||
448 | .stop = cpu_seq_stop, | ||
449 | .show = cpu_seq_show, | ||
450 | }; | ||
451 | |||
452 | static int cpu_seq_open(struct inode *inode, struct file *file) | ||
453 | { | ||
454 | struct cpu_private *priv = inode->i_private; | ||
455 | struct seq_file *seq; | ||
456 | int err; | ||
457 | |||
458 | err = seq_open(file, &cpu_seq_ops); | ||
459 | if (!err) { | ||
460 | seq = file->private_data; | ||
461 | seq->private = priv; | ||
462 | } | ||
463 | |||
464 | return err; | ||
465 | } | ||
466 | |||
467 | static int write_msr(struct cpu_private *priv, u64 val) | ||
468 | { | ||
469 | u32 low, high; | ||
470 | |||
471 | high = (val >> 32) & 0xffffffff; | ||
472 | low = val & 0xffffffff; | ||
473 | |||
474 | if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high)) | ||
475 | return 0; | ||
476 | |||
477 | return -EPERM; | ||
478 | } | ||
479 | |||
480 | static int write_cpu_register(struct cpu_private *priv, const char *buf) | ||
481 | { | ||
482 | int ret = -EPERM; | ||
483 | u64 val; | ||
484 | |||
485 | ret = strict_strtoull(buf, 0, &val); | ||
486 | if (ret < 0) | ||
487 | return ret; | ||
488 | |||
489 | /* Supporting only MSRs */ | ||
490 | if (priv->type < CPU_TSS_BIT) | ||
491 | return write_msr(priv, val); | ||
492 | |||
493 | return ret; | ||
494 | } | ||
495 | |||
496 | static ssize_t cpu_write(struct file *file, const char __user *ubuf, | ||
497 | size_t count, loff_t *off) | ||
498 | { | ||
499 | struct seq_file *seq = file->private_data; | ||
500 | struct cpu_private *priv = seq->private; | ||
501 | char buf[19]; | ||
502 | |||
503 | if ((priv == NULL) || (count >= sizeof(buf))) | ||
504 | return -EINVAL; | ||
505 | |||
506 | if (copy_from_user(&buf, ubuf, count)) | ||
507 | return -EFAULT; | ||
508 | |||
509 | buf[count] = 0; | ||
510 | |||
511 | if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write)) | ||
512 | if (!write_cpu_register(priv, buf)) | ||
513 | return count; | ||
514 | |||
515 | return -EACCES; | ||
516 | } | ||
517 | |||
518 | static const struct file_operations cpu_fops = { | ||
519 | .owner = THIS_MODULE, | ||
520 | .open = cpu_seq_open, | ||
521 | .read = seq_read, | ||
522 | .write = cpu_write, | ||
523 | .llseek = seq_lseek, | ||
524 | .release = seq_release, | ||
525 | }; | ||
526 | |||
527 | static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg, | ||
528 | unsigned file, struct dentry *dentry) | ||
529 | { | ||
530 | struct cpu_private *priv = NULL; | ||
531 | |||
532 | /* Already intialized */ | ||
533 | if (file == CPU_INDEX_BIT) | ||
534 | if (per_cpu(cpud_arr[type].init, cpu)) | ||
535 | return 0; | ||
536 | |||
537 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
538 | if (priv == NULL) | ||
539 | return -ENOMEM; | ||
540 | |||
541 | priv->cpu = cpu; | ||
542 | priv->type = type; | ||
543 | priv->reg = reg; | ||
544 | priv->file = file; | ||
545 | mutex_lock(&cpu_debug_lock); | ||
546 | per_cpu(cpud_priv_arr[type], cpu) = priv; | ||
547 | per_cpu(cpud_priv_count, cpu)++; | ||
548 | mutex_unlock(&cpu_debug_lock); | ||
549 | |||
550 | if (file) | ||
551 | debugfs_create_file(cpu_file[file].name, S_IRUGO, | ||
552 | dentry, (void *)priv, &cpu_fops); | ||
553 | else { | ||
554 | debugfs_create_file(cpu_base[type].name, S_IRUGO, | ||
555 | per_cpu(cpud_arr[type].dentry, cpu), | ||
556 | (void *)priv, &cpu_fops); | ||
557 | mutex_lock(&cpu_debug_lock); | ||
558 | per_cpu(cpud_arr[type].init, cpu) = 1; | ||
559 | mutex_unlock(&cpu_debug_lock); | ||
560 | } | ||
561 | |||
562 | return 0; | ||
563 | } | ||
564 | |||
565 | static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg, | ||
566 | struct dentry *dentry) | ||
567 | { | ||
568 | unsigned file; | ||
569 | int err = 0; | ||
570 | |||
571 | for (file = 0; file < ARRAY_SIZE(cpu_file); file++) { | ||
572 | err = cpu_create_file(cpu, type, reg, file, dentry); | ||
573 | if (err) | ||
574 | return err; | ||
575 | } | ||
576 | |||
577 | return err; | ||
578 | } | ||
579 | |||
580 | static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry) | ||
581 | { | ||
582 | struct dentry *cpu_dentry = NULL; | ||
583 | unsigned reg, reg_min, reg_max; | ||
584 | int i, err = 0; | ||
585 | char reg_dir[12]; | ||
586 | u32 low, high; | ||
587 | |||
588 | for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) { | ||
589 | if (!get_cpu_range(cpu, ®_min, ®_max, i, | ||
590 | cpu_base[type].flag)) | ||
591 | continue; | ||
592 | |||
593 | for (reg = reg_min; reg <= reg_max; reg++) { | ||
594 | if (rdmsr_safe_on_cpu(cpu, reg, &low, &high)) | ||
595 | continue; | ||
596 | |||
597 | sprintf(reg_dir, "0x%x", reg); | ||
598 | cpu_dentry = debugfs_create_dir(reg_dir, dentry); | ||
599 | err = cpu_init_regfiles(cpu, type, reg, cpu_dentry); | ||
600 | if (err) | ||
601 | return err; | ||
602 | } | ||
603 | } | ||
604 | |||
605 | return err; | ||
606 | } | ||
607 | |||
608 | static int cpu_init_allreg(unsigned cpu, struct dentry *dentry) | ||
609 | { | ||
610 | struct dentry *cpu_dentry = NULL; | ||
611 | unsigned type; | ||
612 | int err = 0; | ||
613 | |||
614 | for (type = 0; type < ARRAY_SIZE(cpu_base) - 1; type++) { | ||
615 | if (!is_typeflag_valid(cpu, cpu_base[type].flag)) | ||
616 | continue; | ||
617 | cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry); | ||
618 | per_cpu(cpud_arr[type].dentry, cpu) = cpu_dentry; | ||
619 | |||
620 | if (type < CPU_TSS_BIT) | ||
621 | err = cpu_init_msr(cpu, type, cpu_dentry); | ||
622 | else | ||
623 | err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT, | ||
624 | cpu_dentry); | ||
625 | if (err) | ||
626 | return err; | ||
627 | } | ||
628 | |||
629 | return err; | ||
630 | } | ||
631 | |||
632 | static int cpu_init_cpu(void) | ||
633 | { | ||
634 | struct dentry *cpu_dentry = NULL; | ||
635 | struct cpuinfo_x86 *cpui; | ||
636 | char cpu_dir[12]; | ||
637 | unsigned cpu; | ||
638 | int err = 0; | ||
639 | |||
640 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) { | ||
641 | cpui = &cpu_data(cpu); | ||
642 | if (!cpu_has(cpui, X86_FEATURE_MSR)) | ||
643 | continue; | ||
644 | |||
645 | sprintf(cpu_dir, "cpu%d", cpu); | ||
646 | cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir); | ||
647 | err = cpu_init_allreg(cpu, cpu_dentry); | ||
648 | |||
649 | pr_info("cpu%d(%d) debug files %d\n", | ||
650 | cpu, nr_cpu_ids, per_cpu(cpud_priv_count, cpu)); | ||
651 | if (per_cpu(cpud_priv_count, cpu) > MAX_CPU_FILES) { | ||
652 | pr_err("Register files count %d exceeds limit %d\n", | ||
653 | per_cpu(cpud_priv_count, cpu), MAX_CPU_FILES); | ||
654 | per_cpu(cpud_priv_count, cpu) = MAX_CPU_FILES; | ||
655 | err = -ENFILE; | ||
656 | } | ||
657 | if (err) | ||
658 | return err; | ||
659 | } | ||
660 | |||
661 | return err; | ||
662 | } | ||
663 | |||
664 | static int __init cpu_debug_init(void) | ||
665 | { | ||
666 | cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir); | ||
667 | |||
668 | return cpu_init_cpu(); | ||
669 | } | ||
670 | |||
671 | static void __exit cpu_debug_exit(void) | ||
672 | { | ||
673 | int i, cpu; | ||
674 | |||
675 | if (cpu_debugfs_dir) | ||
676 | debugfs_remove_recursive(cpu_debugfs_dir); | ||
677 | |||
678 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) | ||
679 | for (i = 0; i < per_cpu(cpud_priv_count, cpu); i++) | ||
680 | kfree(per_cpu(cpud_priv_arr[i], cpu)); | ||
681 | } | ||
682 | |||
683 | module_init(cpu_debug_init); | ||
684 | module_exit(cpu_debug_exit); | ||
685 | |||
686 | MODULE_AUTHOR("Jaswinder Singh Rajput"); | ||
687 | MODULE_DESCRIPTION("CPU Debug module"); | ||
688 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index f125e5c551c0..6e44519960c8 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -1356,6 +1356,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) | |||
1356 | 1356 | ||
1357 | kfree(data->powernow_table); | 1357 | kfree(data->powernow_table); |
1358 | kfree(data); | 1358 | kfree(data); |
1359 | per_cpu(powernow_data, pol->cpu) = NULL; | ||
1359 | 1360 | ||
1360 | return 0; | 1361 | return 0; |
1361 | } | 1362 | } |
@@ -1375,7 +1376,7 @@ static unsigned int powernowk8_get(unsigned int cpu) | |||
1375 | int err; | 1376 | int err; |
1376 | 1377 | ||
1377 | if (!data) | 1378 | if (!data) |
1378 | return -EINVAL; | 1379 | return 0; |
1379 | 1380 | ||
1380 | smp_call_function_single(cpu, query_values_on_cpu, &err, true); | 1381 | smp_call_function_single(cpu, query_values_on_cpu, &err, true); |
1381 | if (err) | 1382 | if (err) |
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index cb27fd6136c9..83e5e628de73 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
@@ -229,7 +229,7 @@ static void __exit cpuid_exit(void) | |||
229 | for_each_online_cpu(cpu) | 229 | for_each_online_cpu(cpu) |
230 | cpuid_device_destroy(cpu); | 230 | cpuid_device_destroy(cpu); |
231 | class_destroy(cpuid_class); | 231 | class_destroy(cpuid_class); |
232 | unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); | 232 | __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); |
233 | unregister_hotcpu_notifier(&cpuid_class_cpu_notifier); | 233 | unregister_hotcpu_notifier(&cpuid_class_cpu_notifier); |
234 | } | 234 | } |
235 | 235 | ||
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index ba6e65884603..ad80a1c718c6 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -34,6 +34,8 @@ | |||
34 | */ | 34 | */ |
35 | unsigned long hpet_address; | 35 | unsigned long hpet_address; |
36 | u8 hpet_blockid; /* OS timer block num */ | 36 | u8 hpet_blockid; /* OS timer block num */ |
37 | u8 hpet_msi_disable; | ||
38 | |||
37 | #ifdef CONFIG_PCI_MSI | 39 | #ifdef CONFIG_PCI_MSI |
38 | static unsigned long hpet_num_timers; | 40 | static unsigned long hpet_num_timers; |
39 | #endif | 41 | #endif |
@@ -596,6 +598,9 @@ static void hpet_msi_capability_lookup(unsigned int start_timer) | |||
596 | unsigned int num_timers_used = 0; | 598 | unsigned int num_timers_used = 0; |
597 | int i; | 599 | int i; |
598 | 600 | ||
601 | if (hpet_msi_disable) | ||
602 | return; | ||
603 | |||
599 | if (boot_cpu_has(X86_FEATURE_ARAT)) | 604 | if (boot_cpu_has(X86_FEATURE_ARAT)) |
600 | return; | 605 | return; |
601 | id = hpet_readl(HPET_ID); | 606 | id = hpet_readl(HPET_ID); |
@@ -928,6 +933,9 @@ static __init int hpet_late_init(void) | |||
928 | hpet_reserve_platform_timers(hpet_readl(HPET_ID)); | 933 | hpet_reserve_platform_timers(hpet_readl(HPET_ID)); |
929 | hpet_print_config(); | 934 | hpet_print_config(); |
930 | 935 | ||
936 | if (hpet_msi_disable) | ||
937 | return 0; | ||
938 | |||
931 | if (boot_cpu_has(X86_FEATURE_ARAT)) | 939 | if (boot_cpu_has(X86_FEATURE_ARAT)) |
932 | return 0; | 940 | return 0; |
933 | 941 | ||
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index dd74fe7273b1..bfba6019d762 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | #include <linux/smp.h> | 43 | #include <linux/smp.h> |
44 | #include <linux/nmi.h> | 44 | #include <linux/nmi.h> |
45 | #include <linux/hw_breakpoint.h> | ||
45 | 46 | ||
46 | #include <asm/debugreg.h> | 47 | #include <asm/debugreg.h> |
47 | #include <asm/apicdef.h> | 48 | #include <asm/apicdef.h> |
@@ -204,40 +205,81 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
204 | 205 | ||
205 | static struct hw_breakpoint { | 206 | static struct hw_breakpoint { |
206 | unsigned enabled; | 207 | unsigned enabled; |
207 | unsigned type; | ||
208 | unsigned len; | ||
209 | unsigned long addr; | 208 | unsigned long addr; |
209 | int len; | ||
210 | int type; | ||
211 | struct perf_event **pev; | ||
210 | } breakinfo[4]; | 212 | } breakinfo[4]; |
211 | 213 | ||
212 | static void kgdb_correct_hw_break(void) | 214 | static void kgdb_correct_hw_break(void) |
213 | { | 215 | { |
214 | unsigned long dr7; | ||
215 | int correctit = 0; | ||
216 | int breakbit; | ||
217 | int breakno; | 216 | int breakno; |
218 | 217 | ||
219 | get_debugreg(dr7, 7); | ||
220 | for (breakno = 0; breakno < 4; breakno++) { | 218 | for (breakno = 0; breakno < 4; breakno++) { |
221 | breakbit = 2 << (breakno << 1); | 219 | struct perf_event *bp; |
222 | if (!(dr7 & breakbit) && breakinfo[breakno].enabled) { | 220 | struct arch_hw_breakpoint *info; |
223 | correctit = 1; | 221 | int val; |
224 | dr7 |= breakbit; | 222 | int cpu = raw_smp_processor_id(); |
225 | dr7 &= ~(0xf0000 << (breakno << 2)); | 223 | if (!breakinfo[breakno].enabled) |
226 | dr7 |= ((breakinfo[breakno].len << 2) | | 224 | continue; |
227 | breakinfo[breakno].type) << | 225 | bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu); |
228 | ((breakno << 2) + 16); | 226 | info = counter_arch_bp(bp); |
229 | set_debugreg(breakinfo[breakno].addr, breakno); | 227 | if (bp->attr.disabled != 1) |
230 | 228 | continue; | |
231 | } else { | 229 | bp->attr.bp_addr = breakinfo[breakno].addr; |
232 | if ((dr7 & breakbit) && !breakinfo[breakno].enabled) { | 230 | bp->attr.bp_len = breakinfo[breakno].len; |
233 | correctit = 1; | 231 | bp->attr.bp_type = breakinfo[breakno].type; |
234 | dr7 &= ~breakbit; | 232 | info->address = breakinfo[breakno].addr; |
235 | dr7 &= ~(0xf0000 << (breakno << 2)); | 233 | info->len = breakinfo[breakno].len; |
236 | } | 234 | info->type = breakinfo[breakno].type; |
237 | } | 235 | val = arch_install_hw_breakpoint(bp); |
236 | if (!val) | ||
237 | bp->attr.disabled = 0; | ||
238 | } | ||
239 | hw_breakpoint_restore(); | ||
240 | } | ||
241 | |||
242 | static int hw_break_reserve_slot(int breakno) | ||
243 | { | ||
244 | int cpu; | ||
245 | int cnt = 0; | ||
246 | struct perf_event **pevent; | ||
247 | |||
248 | for_each_online_cpu(cpu) { | ||
249 | cnt++; | ||
250 | pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); | ||
251 | if (dbg_reserve_bp_slot(*pevent)) | ||
252 | goto fail; | ||
253 | } | ||
254 | |||
255 | return 0; | ||
256 | |||
257 | fail: | ||
258 | for_each_online_cpu(cpu) { | ||
259 | cnt--; | ||
260 | if (!cnt) | ||
261 | break; | ||
262 | pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); | ||
263 | dbg_release_bp_slot(*pevent); | ||
238 | } | 264 | } |
239 | if (correctit) | 265 | return -1; |
240 | set_debugreg(dr7, 7); | 266 | } |
267 | |||
268 | static int hw_break_release_slot(int breakno) | ||
269 | { | ||
270 | struct perf_event **pevent; | ||
271 | int cpu; | ||
272 | |||
273 | for_each_online_cpu(cpu) { | ||
274 | pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); | ||
275 | if (dbg_release_bp_slot(*pevent)) | ||
276 | /* | ||
277 | * The debugger is responisble for handing the retry on | ||
278 | * remove failure. | ||
279 | */ | ||
280 | return -1; | ||
281 | } | ||
282 | return 0; | ||
241 | } | 283 | } |
242 | 284 | ||
243 | static int | 285 | static int |
@@ -251,6 +293,10 @@ kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) | |||
251 | if (i == 4) | 293 | if (i == 4) |
252 | return -1; | 294 | return -1; |
253 | 295 | ||
296 | if (hw_break_release_slot(i)) { | ||
297 | printk(KERN_ERR "Cannot remove hw breakpoint at %lx\n", addr); | ||
298 | return -1; | ||
299 | } | ||
254 | breakinfo[i].enabled = 0; | 300 | breakinfo[i].enabled = 0; |
255 | 301 | ||
256 | return 0; | 302 | return 0; |
@@ -259,15 +305,23 @@ kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) | |||
259 | static void kgdb_remove_all_hw_break(void) | 305 | static void kgdb_remove_all_hw_break(void) |
260 | { | 306 | { |
261 | int i; | 307 | int i; |
308 | int cpu = raw_smp_processor_id(); | ||
309 | struct perf_event *bp; | ||
262 | 310 | ||
263 | for (i = 0; i < 4; i++) | 311 | for (i = 0; i < 4; i++) { |
264 | memset(&breakinfo[i], 0, sizeof(struct hw_breakpoint)); | 312 | if (!breakinfo[i].enabled) |
313 | continue; | ||
314 | bp = *per_cpu_ptr(breakinfo[i].pev, cpu); | ||
315 | if (bp->attr.disabled == 1) | ||
316 | continue; | ||
317 | arch_uninstall_hw_breakpoint(bp); | ||
318 | bp->attr.disabled = 1; | ||
319 | } | ||
265 | } | 320 | } |
266 | 321 | ||
267 | static int | 322 | static int |
268 | kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) | 323 | kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) |
269 | { | 324 | { |
270 | unsigned type; | ||
271 | int i; | 325 | int i; |
272 | 326 | ||
273 | for (i = 0; i < 4; i++) | 327 | for (i = 0; i < 4; i++) |
@@ -278,27 +332,42 @@ kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) | |||
278 | 332 | ||
279 | switch (bptype) { | 333 | switch (bptype) { |
280 | case BP_HARDWARE_BREAKPOINT: | 334 | case BP_HARDWARE_BREAKPOINT: |
281 | type = 0; | 335 | len = 1; |
282 | len = 1; | 336 | breakinfo[i].type = X86_BREAKPOINT_EXECUTE; |
283 | break; | 337 | break; |
284 | case BP_WRITE_WATCHPOINT: | 338 | case BP_WRITE_WATCHPOINT: |
285 | type = 1; | 339 | breakinfo[i].type = X86_BREAKPOINT_WRITE; |
286 | break; | 340 | break; |
287 | case BP_ACCESS_WATCHPOINT: | 341 | case BP_ACCESS_WATCHPOINT: |
288 | type = 3; | 342 | breakinfo[i].type = X86_BREAKPOINT_RW; |
289 | break; | 343 | break; |
290 | default: | 344 | default: |
291 | return -1; | 345 | return -1; |
292 | } | 346 | } |
293 | 347 | switch (len) { | |
294 | if (len == 1 || len == 2 || len == 4) | 348 | case 1: |
295 | breakinfo[i].len = len - 1; | 349 | breakinfo[i].len = X86_BREAKPOINT_LEN_1; |
296 | else | 350 | break; |
351 | case 2: | ||
352 | breakinfo[i].len = X86_BREAKPOINT_LEN_2; | ||
353 | break; | ||
354 | case 4: | ||
355 | breakinfo[i].len = X86_BREAKPOINT_LEN_4; | ||
356 | break; | ||
357 | #ifdef CONFIG_X86_64 | ||
358 | case 8: | ||
359 | breakinfo[i].len = X86_BREAKPOINT_LEN_8; | ||
360 | break; | ||
361 | #endif | ||
362 | default: | ||
297 | return -1; | 363 | return -1; |
298 | 364 | } | |
299 | breakinfo[i].enabled = 1; | ||
300 | breakinfo[i].addr = addr; | 365 | breakinfo[i].addr = addr; |
301 | breakinfo[i].type = type; | 366 | if (hw_break_reserve_slot(i)) { |
367 | breakinfo[i].addr = 0; | ||
368 | return -1; | ||
369 | } | ||
370 | breakinfo[i].enabled = 1; | ||
302 | 371 | ||
303 | return 0; | 372 | return 0; |
304 | } | 373 | } |
@@ -313,8 +382,21 @@ kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) | |||
313 | */ | 382 | */ |
314 | void kgdb_disable_hw_debug(struct pt_regs *regs) | 383 | void kgdb_disable_hw_debug(struct pt_regs *regs) |
315 | { | 384 | { |
385 | int i; | ||
386 | int cpu = raw_smp_processor_id(); | ||
387 | struct perf_event *bp; | ||
388 | |||
316 | /* Disable hardware debugging while we are in kgdb: */ | 389 | /* Disable hardware debugging while we are in kgdb: */ |
317 | set_debugreg(0UL, 7); | 390 | set_debugreg(0UL, 7); |
391 | for (i = 0; i < 4; i++) { | ||
392 | if (!breakinfo[i].enabled) | ||
393 | continue; | ||
394 | bp = *per_cpu_ptr(breakinfo[i].pev, cpu); | ||
395 | if (bp->attr.disabled == 1) | ||
396 | continue; | ||
397 | arch_uninstall_hw_breakpoint(bp); | ||
398 | bp->attr.disabled = 1; | ||
399 | } | ||
318 | } | 400 | } |
319 | 401 | ||
320 | /** | 402 | /** |
@@ -378,7 +460,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, | |||
378 | struct pt_regs *linux_regs) | 460 | struct pt_regs *linux_regs) |
379 | { | 461 | { |
380 | unsigned long addr; | 462 | unsigned long addr; |
381 | unsigned long dr6; | ||
382 | char *ptr; | 463 | char *ptr; |
383 | int newPC; | 464 | int newPC; |
384 | 465 | ||
@@ -404,20 +485,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, | |||
404 | raw_smp_processor_id()); | 485 | raw_smp_processor_id()); |
405 | } | 486 | } |
406 | 487 | ||
407 | get_debugreg(dr6, 6); | ||
408 | if (!(dr6 & 0x4000)) { | ||
409 | int breakno; | ||
410 | |||
411 | for (breakno = 0; breakno < 4; breakno++) { | ||
412 | if (dr6 & (1 << breakno) && | ||
413 | breakinfo[breakno].type == 0) { | ||
414 | /* Set restore flag: */ | ||
415 | linux_regs->flags |= X86_EFLAGS_RF; | ||
416 | break; | ||
417 | } | ||
418 | } | ||
419 | } | ||
420 | set_debugreg(0UL, 6); | ||
421 | kgdb_correct_hw_break(); | 488 | kgdb_correct_hw_break(); |
422 | 489 | ||
423 | return 0; | 490 | return 0; |
@@ -485,8 +552,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd) | |||
485 | break; | 552 | break; |
486 | 553 | ||
487 | case DIE_DEBUG: | 554 | case DIE_DEBUG: |
488 | if (atomic_read(&kgdb_cpu_doing_single_step) == | 555 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { |
489 | raw_smp_processor_id()) { | ||
490 | if (user_mode(regs)) | 556 | if (user_mode(regs)) |
491 | return single_step_cont(regs, args); | 557 | return single_step_cont(regs, args); |
492 | break; | 558 | break; |
@@ -539,7 +605,42 @@ static struct notifier_block kgdb_notifier = { | |||
539 | */ | 605 | */ |
540 | int kgdb_arch_init(void) | 606 | int kgdb_arch_init(void) |
541 | { | 607 | { |
542 | return register_die_notifier(&kgdb_notifier); | 608 | int i, cpu; |
609 | int ret; | ||
610 | struct perf_event_attr attr; | ||
611 | struct perf_event **pevent; | ||
612 | |||
613 | ret = register_die_notifier(&kgdb_notifier); | ||
614 | if (ret != 0) | ||
615 | return ret; | ||
616 | /* | ||
617 | * Pre-allocate the hw breakpoint structions in the non-atomic | ||
618 | * portion of kgdb because this operation requires mutexs to | ||
619 | * complete. | ||
620 | */ | ||
621 | attr.bp_addr = (unsigned long)kgdb_arch_init; | ||
622 | attr.type = PERF_TYPE_BREAKPOINT; | ||
623 | attr.bp_len = HW_BREAKPOINT_LEN_1; | ||
624 | attr.bp_type = HW_BREAKPOINT_W; | ||
625 | attr.disabled = 1; | ||
626 | for (i = 0; i < 4; i++) { | ||
627 | breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL); | ||
628 | if (IS_ERR(breakinfo[i].pev)) { | ||
629 | printk(KERN_ERR "kgdb: Could not allocate hw breakpoints\n"); | ||
630 | breakinfo[i].pev = NULL; | ||
631 | kgdb_arch_exit(); | ||
632 | return -1; | ||
633 | } | ||
634 | for_each_online_cpu(cpu) { | ||
635 | pevent = per_cpu_ptr(breakinfo[i].pev, cpu); | ||
636 | pevent[0]->hw.sample_period = 1; | ||
637 | if (pevent[0]->destroy != NULL) { | ||
638 | pevent[0]->destroy = NULL; | ||
639 | release_bp_slot(*pevent); | ||
640 | } | ||
641 | } | ||
642 | } | ||
643 | return ret; | ||
543 | } | 644 | } |
544 | 645 | ||
545 | /** | 646 | /** |
@@ -550,6 +651,13 @@ int kgdb_arch_init(void) | |||
550 | */ | 651 | */ |
551 | void kgdb_arch_exit(void) | 652 | void kgdb_arch_exit(void) |
552 | { | 653 | { |
654 | int i; | ||
655 | for (i = 0; i < 4; i++) { | ||
656 | if (breakinfo[i].pev) { | ||
657 | unregister_wide_hw_breakpoint(breakinfo[i].pev); | ||
658 | breakinfo[i].pev = NULL; | ||
659 | } | ||
660 | } | ||
553 | unregister_die_notifier(&kgdb_notifier); | 661 | unregister_die_notifier(&kgdb_notifier); |
554 | } | 662 | } |
555 | 663 | ||
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 37542b67c57e..e1af7c055c7d 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -36,9 +36,6 @@ MODULE_LICENSE("GPL v2"); | |||
36 | #define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000 | 36 | #define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000 |
37 | #define UCODE_UCODE_TYPE 0x00000001 | 37 | #define UCODE_UCODE_TYPE 0x00000001 |
38 | 38 | ||
39 | const struct firmware *firmware; | ||
40 | static int supported_cpu; | ||
41 | |||
42 | struct equiv_cpu_entry { | 39 | struct equiv_cpu_entry { |
43 | u32 installed_cpu; | 40 | u32 installed_cpu; |
44 | u32 fixed_errata_mask; | 41 | u32 fixed_errata_mask; |
@@ -77,12 +74,15 @@ static struct equiv_cpu_entry *equiv_cpu_table; | |||
77 | 74 | ||
78 | static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) | 75 | static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) |
79 | { | 76 | { |
77 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
80 | u32 dummy; | 78 | u32 dummy; |
81 | 79 | ||
82 | if (!supported_cpu) | ||
83 | return -1; | ||
84 | |||
85 | memset(csig, 0, sizeof(*csig)); | 80 | memset(csig, 0, sizeof(*csig)); |
81 | if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { | ||
82 | pr_warning("microcode: CPU%d: AMD CPU family 0x%x not " | ||
83 | "supported\n", cpu, c->x86); | ||
84 | return -1; | ||
85 | } | ||
86 | rdmsr(MSR_AMD64_PATCH_LEVEL, csig->rev, dummy); | 86 | rdmsr(MSR_AMD64_PATCH_LEVEL, csig->rev, dummy); |
87 | pr_info("CPU%d: patch_level=0x%x\n", cpu, csig->rev); | 87 | pr_info("CPU%d: patch_level=0x%x\n", cpu, csig->rev); |
88 | return 0; | 88 | return 0; |
@@ -294,10 +294,14 @@ generic_load_microcode(int cpu, const u8 *data, size_t size) | |||
294 | 294 | ||
295 | static enum ucode_state request_microcode_fw(int cpu, struct device *device) | 295 | static enum ucode_state request_microcode_fw(int cpu, struct device *device) |
296 | { | 296 | { |
297 | const char *fw_name = "amd-ucode/microcode_amd.bin"; | ||
298 | const struct firmware *firmware; | ||
297 | enum ucode_state ret; | 299 | enum ucode_state ret; |
298 | 300 | ||
299 | if (firmware == NULL) | 301 | if (request_firmware(&firmware, fw_name, device)) { |
302 | printk(KERN_ERR "microcode: failed to load file %s\n", fw_name); | ||
300 | return UCODE_NFOUND; | 303 | return UCODE_NFOUND; |
304 | } | ||
301 | 305 | ||
302 | if (*(u32 *)firmware->data != UCODE_MAGIC) { | 306 | if (*(u32 *)firmware->data != UCODE_MAGIC) { |
303 | pr_err("invalid UCODE_MAGIC (0x%08x)\n", | 307 | pr_err("invalid UCODE_MAGIC (0x%08x)\n", |
@@ -307,6 +311,8 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) | |||
307 | 311 | ||
308 | ret = generic_load_microcode(cpu, firmware->data, firmware->size); | 312 | ret = generic_load_microcode(cpu, firmware->data, firmware->size); |
309 | 313 | ||
314 | release_firmware(firmware); | ||
315 | |||
310 | return ret; | 316 | return ret; |
311 | } | 317 | } |
312 | 318 | ||
@@ -325,31 +331,7 @@ static void microcode_fini_cpu_amd(int cpu) | |||
325 | uci->mc = NULL; | 331 | uci->mc = NULL; |
326 | } | 332 | } |
327 | 333 | ||
328 | void init_microcode_amd(struct device *device) | ||
329 | { | ||
330 | const char *fw_name = "amd-ucode/microcode_amd.bin"; | ||
331 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
332 | |||
333 | WARN_ON(c->x86_vendor != X86_VENDOR_AMD); | ||
334 | |||
335 | if (c->x86 < 0x10) { | ||
336 | pr_warning("AMD CPU family 0x%x not supported\n", c->x86); | ||
337 | return; | ||
338 | } | ||
339 | supported_cpu = 1; | ||
340 | |||
341 | if (request_firmware(&firmware, fw_name, device)) | ||
342 | pr_err("failed to load file %s\n", fw_name); | ||
343 | } | ||
344 | |||
345 | void fini_microcode_amd(void) | ||
346 | { | ||
347 | release_firmware(firmware); | ||
348 | } | ||
349 | |||
350 | static struct microcode_ops microcode_amd_ops = { | 334 | static struct microcode_ops microcode_amd_ops = { |
351 | .init = init_microcode_amd, | ||
352 | .fini = fini_microcode_amd, | ||
353 | .request_microcode_user = request_microcode_user, | 335 | .request_microcode_user = request_microcode_user, |
354 | .request_microcode_fw = request_microcode_fw, | 336 | .request_microcode_fw = request_microcode_fw, |
355 | .collect_cpu_info = collect_cpu_info_amd, | 337 | .collect_cpu_info = collect_cpu_info_amd, |
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 0c8632433090..cceb5bc3c3c2 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
@@ -521,9 +521,6 @@ static int __init microcode_init(void) | |||
521 | return PTR_ERR(microcode_pdev); | 521 | return PTR_ERR(microcode_pdev); |
522 | } | 522 | } |
523 | 523 | ||
524 | if (microcode_ops->init) | ||
525 | microcode_ops->init(µcode_pdev->dev); | ||
526 | |||
527 | get_online_cpus(); | 524 | get_online_cpus(); |
528 | mutex_lock(µcode_mutex); | 525 | mutex_lock(µcode_mutex); |
529 | 526 | ||
@@ -566,9 +563,6 @@ static void __exit microcode_exit(void) | |||
566 | 563 | ||
567 | platform_device_unregister(microcode_pdev); | 564 | platform_device_unregister(microcode_pdev); |
568 | 565 | ||
569 | if (microcode_ops->fini) | ||
570 | microcode_ops->fini(); | ||
571 | |||
572 | microcode_ops = NULL; | 566 | microcode_ops = NULL; |
573 | 567 | ||
574 | pr_info("Microcode Update Driver: v" MICROCODE_VERSION " removed.\n"); | 568 | pr_info("Microcode Update Driver: v" MICROCODE_VERSION " removed.\n"); |
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 40b54ceb68b5..a2c1edd2d3ac 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -359,13 +359,6 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) | |||
359 | x86_init.mpparse.mpc_record(1); | 359 | x86_init.mpparse.mpc_record(1); |
360 | } | 360 | } |
361 | 361 | ||
362 | #ifdef CONFIG_X86_BIGSMP | ||
363 | generic_bigsmp_probe(); | ||
364 | #endif | ||
365 | |||
366 | if (apic->setup_apic_routing) | ||
367 | apic->setup_apic_routing(); | ||
368 | |||
369 | if (!num_processors) | 362 | if (!num_processors) |
370 | printk(KERN_ERR "MPTABLE: no processors registered!\n"); | 363 | printk(KERN_ERR "MPTABLE: no processors registered!\n"); |
371 | return num_processors; | 364 | return num_processors; |
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 4bd93c9b2b27..206735ac8cbd 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c | |||
@@ -285,7 +285,7 @@ static void __exit msr_exit(void) | |||
285 | for_each_online_cpu(cpu) | 285 | for_each_online_cpu(cpu) |
286 | msr_device_destroy(cpu); | 286 | msr_device_destroy(cpu); |
287 | class_destroy(msr_class); | 287 | class_destroy(msr_class); |
288 | unregister_chrdev(MSR_MAJOR, "cpu/msr"); | 288 | __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); |
289 | unregister_hotcpu_notifier(&msr_class_cpu_notifier); | 289 | unregister_hotcpu_notifier(&msr_class_cpu_notifier); |
290 | } | 290 | } |
291 | 291 | ||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 02c3ee013ccd..c9b3522b6b46 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -115,18 +115,6 @@ void flush_thread(void) | |||
115 | { | 115 | { |
116 | struct task_struct *tsk = current; | 116 | struct task_struct *tsk = current; |
117 | 117 | ||
118 | #ifdef CONFIG_X86_64 | ||
119 | if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) { | ||
120 | clear_tsk_thread_flag(tsk, TIF_ABI_PENDING); | ||
121 | if (test_tsk_thread_flag(tsk, TIF_IA32)) { | ||
122 | clear_tsk_thread_flag(tsk, TIF_IA32); | ||
123 | } else { | ||
124 | set_tsk_thread_flag(tsk, TIF_IA32); | ||
125 | current_thread_info()->status |= TS_COMPAT; | ||
126 | } | ||
127 | } | ||
128 | #endif | ||
129 | |||
130 | flush_ptrace_hw_breakpoint(tsk); | 118 | flush_ptrace_hw_breakpoint(tsk); |
131 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); | 119 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); |
132 | /* | 120 | /* |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index f9e033150cdf..126f0b493d04 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -521,6 +521,18 @@ void set_personality_64bit(void) | |||
521 | current->personality &= ~READ_IMPLIES_EXEC; | 521 | current->personality &= ~READ_IMPLIES_EXEC; |
522 | } | 522 | } |
523 | 523 | ||
524 | void set_personality_ia32(void) | ||
525 | { | ||
526 | /* inherit personality from parent */ | ||
527 | |||
528 | /* Make sure to be in 32bit mode */ | ||
529 | set_thread_flag(TIF_IA32); | ||
530 | current->personality |= force_personality32; | ||
531 | |||
532 | /* Prepare the first "return" to user space */ | ||
533 | current_thread_info()->status |= TS_COMPAT; | ||
534 | } | ||
535 | |||
524 | unsigned long get_wchan(struct task_struct *p) | 536 | unsigned long get_wchan(struct task_struct *p) |
525 | { | 537 | { |
526 | unsigned long stack; | 538 | unsigned long stack; |
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 18093d7498f0..12e9feaa2f7a 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c | |||
@@ -491,6 +491,19 @@ void force_hpet_resume(void) | |||
491 | break; | 491 | break; |
492 | } | 492 | } |
493 | } | 493 | } |
494 | |||
495 | /* | ||
496 | * HPET MSI on some boards (ATI SB700/SB800) has side effect on | ||
497 | * floppy DMA. Disable HPET MSI on such platforms. | ||
498 | */ | ||
499 | static void force_disable_hpet_msi(struct pci_dev *unused) | ||
500 | { | ||
501 | hpet_msi_disable = 1; | ||
502 | } | ||
503 | |||
504 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, | ||
505 | force_disable_hpet_msi); | ||
506 | |||
494 | #endif | 507 | #endif |
495 | 508 | ||
496 | #if defined(CONFIG_PCI) && defined(CONFIG_NUMA) | 509 | #if defined(CONFIG_PCI) && defined(CONFIG_NUMA) |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 1545bc0c9845..704bddcdf64d 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -203,6 +203,15 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
203 | DMI_MATCH(DMI_BOARD_NAME, "0T656F"), | 203 | DMI_MATCH(DMI_BOARD_NAME, "0T656F"), |
204 | }, | 204 | }, |
205 | }, | 205 | }, |
206 | { /* Handle problems with rebooting on Dell OptiPlex 760 with 0G919G*/ | ||
207 | .callback = set_bios_reboot, | ||
208 | .ident = "Dell OptiPlex 760", | ||
209 | .matches = { | ||
210 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
211 | DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 760"), | ||
212 | DMI_MATCH(DMI_BOARD_NAME, "0G919G"), | ||
213 | }, | ||
214 | }, | ||
206 | { /* Handle problems with rebooting on Dell 2400's */ | 215 | { /* Handle problems with rebooting on Dell 2400's */ |
207 | .callback = set_bios_reboot, | 216 | .callback = set_bios_reboot, |
208 | .ident = "Dell PowerEdge 2400", | 217 | .ident = "Dell PowerEdge 2400", |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index f7b8b9894b22..5d9e40c58628 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -642,19 +642,27 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = { | |||
642 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix/MSC"), | 642 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix/MSC"), |
643 | }, | 643 | }, |
644 | }, | 644 | }, |
645 | { | ||
646 | /* | 645 | /* |
647 | * AMI BIOS with low memory corruption was found on Intel DG45ID board. | 646 | * AMI BIOS with low memory corruption was found on Intel DG45ID and |
648 | * It hase different DMI_BIOS_VENDOR = "Intel Corp.", for now we will | 647 | * DG45FC boards. |
648 | * It has a different DMI_BIOS_VENDOR = "Intel Corp.", for now we will | ||
649 | * match only DMI_BOARD_NAME and see if there is more bad products | 649 | * match only DMI_BOARD_NAME and see if there is more bad products |
650 | * with this vendor. | 650 | * with this vendor. |
651 | */ | 651 | */ |
652 | { | ||
652 | .callback = dmi_low_memory_corruption, | 653 | .callback = dmi_low_memory_corruption, |
653 | .ident = "AMI BIOS", | 654 | .ident = "AMI BIOS", |
654 | .matches = { | 655 | .matches = { |
655 | DMI_MATCH(DMI_BOARD_NAME, "DG45ID"), | 656 | DMI_MATCH(DMI_BOARD_NAME, "DG45ID"), |
656 | }, | 657 | }, |
657 | }, | 658 | }, |
659 | { | ||
660 | .callback = dmi_low_memory_corruption, | ||
661 | .ident = "AMI BIOS", | ||
662 | .matches = { | ||
663 | DMI_MATCH(DMI_BOARD_NAME, "DG45FC"), | ||
664 | }, | ||
665 | }, | ||
658 | #endif | 666 | #endif |
659 | {} | 667 | {} |
660 | }; | 668 | }; |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 678d0b8c26f3..b4e870cbdc60 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1083,9 +1083,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1083 | set_cpu_sibling_map(0); | 1083 | set_cpu_sibling_map(0); |
1084 | 1084 | ||
1085 | enable_IR_x2apic(); | 1085 | enable_IR_x2apic(); |
1086 | #ifdef CONFIG_X86_64 | ||
1087 | default_setup_apic_routing(); | 1086 | default_setup_apic_routing(); |
1088 | #endif | ||
1089 | 1087 | ||
1090 | if (smp_sanity_check(max_cpus) < 0) { | 1088 | if (smp_sanity_check(max_cpus) < 0) { |
1091 | printk(KERN_INFO "SMP disabled\n"); | 1089 | printk(KERN_INFO "SMP disabled\n"); |
diff --git a/arch/x86/kernel/uv_time.c b/arch/x86/kernel/uv_time.c index 3c84aa001c11..2b75ef638dbc 100644 --- a/arch/x86/kernel/uv_time.c +++ b/arch/x86/kernel/uv_time.c | |||
@@ -282,10 +282,21 @@ static int uv_rtc_unset_timer(int cpu, int force) | |||
282 | 282 | ||
283 | /* | 283 | /* |
284 | * Read the RTC. | 284 | * Read the RTC. |
285 | * | ||
286 | * Starting with HUB rev 2.0, the UV RTC register is replicated across all | ||
287 | * cachelines of it's own page. This allows faster simultaneous reads | ||
288 | * from a given socket. | ||
285 | */ | 289 | */ |
286 | static cycle_t uv_read_rtc(struct clocksource *cs) | 290 | static cycle_t uv_read_rtc(struct clocksource *cs) |
287 | { | 291 | { |
288 | return (cycle_t)uv_read_local_mmr(UVH_RTC); | 292 | unsigned long offset; |
293 | |||
294 | if (uv_get_min_hub_revision_id() == 1) | ||
295 | offset = 0; | ||
296 | else | ||
297 | offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE; | ||
298 | |||
299 | return (cycle_t)uv_read_local_mmr(UVH_RTC | offset); | ||
289 | } | 300 | } |
290 | 301 | ||
291 | /* | 302 | /* |
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 296aba49472a..15578f180e59 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -467,6 +467,9 @@ static int pit_ioport_read(struct kvm_io_device *this, | |||
467 | return -EOPNOTSUPP; | 467 | return -EOPNOTSUPP; |
468 | 468 | ||
469 | addr &= KVM_PIT_CHANNEL_MASK; | 469 | addr &= KVM_PIT_CHANNEL_MASK; |
470 | if (addr == 3) | ||
471 | return 0; | ||
472 | |||
470 | s = &pit_state->channels[addr]; | 473 | s = &pit_state->channels[addr]; |
471 | 474 | ||
472 | mutex_lock(&pit_state->lock); | 475 | mutex_lock(&pit_state->lock); |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 3063a0c4858b..ba8c045da782 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -373,6 +373,12 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
373 | if (unlikely(!apic_enabled(apic))) | 373 | if (unlikely(!apic_enabled(apic))) |
374 | break; | 374 | break; |
375 | 375 | ||
376 | if (trig_mode) { | ||
377 | apic_debug("level trig mode for vector %d", vector); | ||
378 | apic_set_vector(vector, apic->regs + APIC_TMR); | ||
379 | } else | ||
380 | apic_clear_vector(vector, apic->regs + APIC_TMR); | ||
381 | |||
376 | result = !apic_test_and_set_irr(vector, apic); | 382 | result = !apic_test_and_set_irr(vector, apic); |
377 | trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, | 383 | trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, |
378 | trig_mode, vector, !result); | 384 | trig_mode, vector, !result); |
@@ -383,11 +389,6 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
383 | break; | 389 | break; |
384 | } | 390 | } |
385 | 391 | ||
386 | if (trig_mode) { | ||
387 | apic_debug("level trig mode for vector %d", vector); | ||
388 | apic_set_vector(vector, apic->regs + APIC_TMR); | ||
389 | } else | ||
390 | apic_clear_vector(vector, apic->regs + APIC_TMR); | ||
391 | kvm_vcpu_kick(vcpu); | 392 | kvm_vcpu_kick(vcpu); |
392 | break; | 393 | break; |
393 | 394 | ||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 4c3e5b2314cb..89a49fb46a27 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -477,7 +477,7 @@ static int host_mapping_level(struct kvm *kvm, gfn_t gfn) | |||
477 | 477 | ||
478 | addr = gfn_to_hva(kvm, gfn); | 478 | addr = gfn_to_hva(kvm, gfn); |
479 | if (kvm_is_error_hva(addr)) | 479 | if (kvm_is_error_hva(addr)) |
480 | return page_size; | 480 | return PT_PAGE_TABLE_LEVEL; |
481 | 481 | ||
482 | down_read(¤t->mm->mmap_sem); | 482 | down_read(¤t->mm->mmap_sem); |
483 | vma = find_vma(current->mm, addr); | 483 | vma = find_vma(current->mm, addr); |
@@ -515,11 +515,9 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) | |||
515 | if (host_level == PT_PAGE_TABLE_LEVEL) | 515 | if (host_level == PT_PAGE_TABLE_LEVEL) |
516 | return host_level; | 516 | return host_level; |
517 | 517 | ||
518 | for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) { | 518 | for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) |
519 | |||
520 | if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) | 519 | if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) |
521 | break; | 520 | break; |
522 | } | ||
523 | 521 | ||
524 | return level - 1; | 522 | return level - 1; |
525 | } | 523 | } |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 58a0f1e88596..ede2131a9225 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -150,7 +150,9 @@ walk: | |||
150 | walker->table_gfn[walker->level - 1] = table_gfn; | 150 | walker->table_gfn[walker->level - 1] = table_gfn; |
151 | walker->pte_gpa[walker->level - 1] = pte_gpa; | 151 | walker->pte_gpa[walker->level - 1] = pte_gpa; |
152 | 152 | ||
153 | kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)); | 153 | if (kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte))) |
154 | goto not_present; | ||
155 | |||
154 | trace_kvm_mmu_paging_element(pte, walker->level); | 156 | trace_kvm_mmu_paging_element(pte, walker->level); |
155 | 157 | ||
156 | if (!is_present_gpte(pte)) | 158 | if (!is_present_gpte(pte)) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6651dbf58675..a1e1bc9d412d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -670,7 +670,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) | |||
670 | { | 670 | { |
671 | static int version; | 671 | static int version; |
672 | struct pvclock_wall_clock wc; | 672 | struct pvclock_wall_clock wc; |
673 | struct timespec now, sys, boot; | 673 | struct timespec boot; |
674 | 674 | ||
675 | if (!wall_clock) | 675 | if (!wall_clock) |
676 | return; | 676 | return; |
@@ -685,9 +685,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) | |||
685 | * wall clock specified here. guest system time equals host | 685 | * wall clock specified here. guest system time equals host |
686 | * system time for us, thus we must fill in host boot time here. | 686 | * system time for us, thus we must fill in host boot time here. |
687 | */ | 687 | */ |
688 | now = current_kernel_time(); | 688 | getboottime(&boot); |
689 | ktime_get_ts(&sys); | ||
690 | boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys)); | ||
691 | 689 | ||
692 | wc.sec = boot.tv_sec; | 690 | wc.sec = boot.tv_sec; |
693 | wc.nsec = boot.tv_nsec; | 691 | wc.nsec = boot.tv_nsec; |
@@ -762,6 +760,7 @@ static void kvm_write_guest_time(struct kvm_vcpu *v) | |||
762 | local_irq_save(flags); | 760 | local_irq_save(flags); |
763 | kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp); | 761 | kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp); |
764 | ktime_get_ts(&ts); | 762 | ktime_get_ts(&ts); |
763 | monotonic_to_bootbased(&ts); | ||
765 | local_irq_restore(flags); | 764 | local_irq_restore(flags); |
766 | 765 | ||
767 | /* With all the info we got, fill in the values */ | 766 | /* With all the info we got, fill in the values */ |
@@ -5072,12 +5071,13 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
5072 | GFP_KERNEL); | 5071 | GFP_KERNEL); |
5073 | if (!vcpu->arch.mce_banks) { | 5072 | if (!vcpu->arch.mce_banks) { |
5074 | r = -ENOMEM; | 5073 | r = -ENOMEM; |
5075 | goto fail_mmu_destroy; | 5074 | goto fail_free_lapic; |
5076 | } | 5075 | } |
5077 | vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; | 5076 | vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; |
5078 | 5077 | ||
5079 | return 0; | 5078 | return 0; |
5080 | 5079 | fail_free_lapic: | |
5080 | kvm_free_lapic(vcpu); | ||
5081 | fail_mmu_destroy: | 5081 | fail_mmu_destroy: |
5082 | kvm_mmu_destroy(vcpu); | 5082 | kvm_mmu_destroy(vcpu); |
5083 | fail_free_pio_data: | 5083 | fail_free_pio_data: |
@@ -5088,6 +5088,7 @@ fail: | |||
5088 | 5088 | ||
5089 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | 5089 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
5090 | { | 5090 | { |
5091 | kfree(vcpu->arch.mce_banks); | ||
5091 | kvm_free_lapic(vcpu); | 5092 | kvm_free_lapic(vcpu); |
5092 | down_read(&vcpu->kvm->slots_lock); | 5093 | down_read(&vcpu->kvm->slots_lock); |
5093 | kvm_mmu_destroy(vcpu); | 5094 | kvm_mmu_destroy(vcpu); |
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 71da1bca13cb..738e6593799d 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c | |||
@@ -18,7 +18,7 @@ static inline pte_t gup_get_pte(pte_t *ptep) | |||
18 | #else | 18 | #else |
19 | /* | 19 | /* |
20 | * With get_user_pages_fast, we walk down the pagetables without taking | 20 | * With get_user_pages_fast, we walk down the pagetables without taking |
21 | * any locks. For this we would like to load the pointers atoimcally, | 21 | * any locks. For this we would like to load the pointers atomically, |
22 | * but that is not possible (without expensive cmpxchg8b) on PAE. What | 22 | * but that is not possible (without expensive cmpxchg8b) on PAE. What |
23 | * we do have is the guarantee that a pte will only either go from not | 23 | * we do have is the guarantee that a pte will only either go from not |
24 | * present to present, or present to not present or both -- it will not | 24 | * present to present, or present to not present or both -- it will not |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 5198b9bb34ef..69ddfbd91135 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <asm/numa.h> | 49 | #include <asm/numa.h> |
50 | #include <asm/cacheflush.h> | 50 | #include <asm/cacheflush.h> |
51 | #include <asm/init.h> | 51 | #include <asm/init.h> |
52 | #include <linux/bootmem.h> | ||
52 | 53 | ||
53 | static unsigned long dma_reserve __initdata; | 54 | static unsigned long dma_reserve __initdata; |
54 | 55 | ||
@@ -616,6 +617,21 @@ void __init paging_init(void) | |||
616 | */ | 617 | */ |
617 | #ifdef CONFIG_MEMORY_HOTPLUG | 618 | #ifdef CONFIG_MEMORY_HOTPLUG |
618 | /* | 619 | /* |
620 | * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need | ||
621 | * updating. | ||
622 | */ | ||
623 | static void update_end_of_memory_vars(u64 start, u64 size) | ||
624 | { | ||
625 | unsigned long end_pfn = PFN_UP(start + size); | ||
626 | |||
627 | if (end_pfn > max_pfn) { | ||
628 | max_pfn = end_pfn; | ||
629 | max_low_pfn = end_pfn; | ||
630 | high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; | ||
631 | } | ||
632 | } | ||
633 | |||
634 | /* | ||
619 | * Memory is added always to NORMAL zone. This means you will never get | 635 | * Memory is added always to NORMAL zone. This means you will never get |
620 | * additional DMA/DMA32 memory. | 636 | * additional DMA/DMA32 memory. |
621 | */ | 637 | */ |
@@ -634,6 +650,9 @@ int arch_add_memory(int nid, u64 start, u64 size) | |||
634 | ret = __add_pages(nid, zone, start_pfn, nr_pages); | 650 | ret = __add_pages(nid, zone, start_pfn, nr_pages); |
635 | WARN_ON_ONCE(ret); | 651 | WARN_ON_ONCE(ret); |
636 | 652 | ||
653 | /* update max_pfn, max_low_pfn and high_memory */ | ||
654 | update_end_of_memory_vars(start, size); | ||
655 | |||
637 | return ret; | 656 | return ret; |
638 | } | 657 | } |
639 | EXPORT_SYMBOL_GPL(arch_add_memory); | 658 | EXPORT_SYMBOL_GPL(arch_add_memory); |
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index a27124185fc1..28c68762648f 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -229,9 +229,11 @@ update_nodes_add(int node, unsigned long start, unsigned long end) | |||
229 | printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); | 229 | printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); |
230 | } | 230 | } |
231 | 231 | ||
232 | if (changed) | 232 | if (changed) { |
233 | node_set(node, cpu_nodes_parsed); | ||
233 | printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", | 234 | printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", |
234 | nd->start, nd->end); | 235 | nd->start, nd->end); |
236 | } | ||
235 | } | 237 | } |
236 | 238 | ||
237 | /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ | 239 | /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index cb88b1a0bd5f..3347f696edc7 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -222,7 +222,7 @@ static void nmi_cpu_switch(void *dummy) | |||
222 | 222 | ||
223 | /* move to next set */ | 223 | /* move to next set */ |
224 | si += model->num_counters; | 224 | si += model->num_counters; |
225 | if ((si > model->num_virt_counters) || (counter_config[si].count == 0)) | 225 | if ((si >= model->num_virt_counters) || (counter_config[si].count == 0)) |
226 | per_cpu(switch_index, cpu) = 0; | 226 | per_cpu(switch_index, cpu) = 0; |
227 | else | 227 | else |
228 | per_cpu(switch_index, cpu) = si; | 228 | per_cpu(switch_index, cpu) = si; |
@@ -598,6 +598,7 @@ static int __init ppro_init(char **cpu_type) | |||
598 | case 15: case 23: | 598 | case 15: case 23: |
599 | *cpu_type = "i386/core_2"; | 599 | *cpu_type = "i386/core_2"; |
600 | break; | 600 | break; |
601 | case 0x2e: | ||
601 | case 26: | 602 | case 26: |
602 | spec = &op_arch_perfmon_spec; | 603 | spec = &op_arch_perfmon_spec; |
603 | *cpu_type = "i386/core_i7"; | 604 | *cpu_type = "i386/core_i7"; |
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile index 564b008a51c7..39fba37f702f 100644 --- a/arch/x86/pci/Makefile +++ b/arch/x86/pci/Makefile | |||
@@ -15,7 +15,7 @@ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o | |||
15 | 15 | ||
16 | obj-y += common.o early.o | 16 | obj-y += common.o early.o |
17 | obj-y += amd_bus.o | 17 | obj-y += amd_bus.o |
18 | obj-$(CONFIG_X86_64) += bus_numa.o intel_bus.o | 18 | obj-$(CONFIG_X86_64) += bus_numa.o |
19 | 19 | ||
20 | ifeq ($(CONFIG_PCI_DEBUG),y) | 20 | ifeq ($(CONFIG_PCI_DEBUG),y) |
21 | EXTRA_CFLAGS += -DDEBUG | 21 | EXTRA_CFLAGS += -DDEBUG |
diff --git a/arch/x86/pci/intel_bus.c b/arch/x86/pci/intel_bus.c deleted file mode 100644 index f81a2fa8fe25..000000000000 --- a/arch/x86/pci/intel_bus.c +++ /dev/null | |||
@@ -1,94 +0,0 @@ | |||
1 | /* | ||
2 | * to read io range from IOH pci conf, need to do it after mmconfig is there | ||
3 | */ | ||
4 | |||
5 | #include <linux/delay.h> | ||
6 | #include <linux/dmi.h> | ||
7 | #include <linux/pci.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <asm/pci_x86.h> | ||
10 | |||
11 | #include "bus_numa.h" | ||
12 | |||
13 | static inline void print_ioh_resources(struct pci_root_info *info) | ||
14 | { | ||
15 | int res_num; | ||
16 | int busnum; | ||
17 | int i; | ||
18 | |||
19 | printk(KERN_DEBUG "IOH bus: [%02x, %02x]\n", | ||
20 | info->bus_min, info->bus_max); | ||
21 | res_num = info->res_num; | ||
22 | busnum = info->bus_min; | ||
23 | for (i = 0; i < res_num; i++) { | ||
24 | struct resource *res; | ||
25 | |||
26 | res = &info->res[i]; | ||
27 | printk(KERN_DEBUG "IOH bus: %02x index %x %s: [%llx, %llx]\n", | ||
28 | busnum, i, | ||
29 | (res->flags & IORESOURCE_IO) ? "io port" : | ||
30 | "mmio", | ||
31 | res->start, res->end); | ||
32 | } | ||
33 | } | ||
34 | |||
35 | #define IOH_LIO 0x108 | ||
36 | #define IOH_LMMIOL 0x10c | ||
37 | #define IOH_LMMIOH 0x110 | ||
38 | #define IOH_LMMIOH_BASEU 0x114 | ||
39 | #define IOH_LMMIOH_LIMITU 0x118 | ||
40 | #define IOH_LCFGBUS 0x11c | ||
41 | |||
42 | static void __devinit pci_root_bus_res(struct pci_dev *dev) | ||
43 | { | ||
44 | u16 word; | ||
45 | u32 dword; | ||
46 | struct pci_root_info *info; | ||
47 | u16 io_base, io_end; | ||
48 | u32 mmiol_base, mmiol_end; | ||
49 | u64 mmioh_base, mmioh_end; | ||
50 | int bus_base, bus_end; | ||
51 | |||
52 | /* some sys doesn't get mmconf enabled */ | ||
53 | if (dev->cfg_size < 0x120) | ||
54 | return; | ||
55 | |||
56 | if (pci_root_num >= PCI_ROOT_NR) { | ||
57 | printk(KERN_DEBUG "intel_bus.c: PCI_ROOT_NR is too small\n"); | ||
58 | return; | ||
59 | } | ||
60 | |||
61 | info = &pci_root_info[pci_root_num]; | ||
62 | pci_root_num++; | ||
63 | |||
64 | pci_read_config_word(dev, IOH_LCFGBUS, &word); | ||
65 | bus_base = (word & 0xff); | ||
66 | bus_end = (word & 0xff00) >> 8; | ||
67 | sprintf(info->name, "PCI Bus #%02x", bus_base); | ||
68 | info->bus_min = bus_base; | ||
69 | info->bus_max = bus_end; | ||
70 | |||
71 | pci_read_config_word(dev, IOH_LIO, &word); | ||
72 | io_base = (word & 0xf0) << (12 - 4); | ||
73 | io_end = (word & 0xf000) | 0xfff; | ||
74 | update_res(info, io_base, io_end, IORESOURCE_IO, 0); | ||
75 | |||
76 | pci_read_config_dword(dev, IOH_LMMIOL, &dword); | ||
77 | mmiol_base = (dword & 0xff00) << (24 - 8); | ||
78 | mmiol_end = (dword & 0xff000000) | 0xffffff; | ||
79 | update_res(info, mmiol_base, mmiol_end, IORESOURCE_MEM, 0); | ||
80 | |||
81 | pci_read_config_dword(dev, IOH_LMMIOH, &dword); | ||
82 | mmioh_base = ((u64)(dword & 0xfc00)) << (26 - 10); | ||
83 | mmioh_end = ((u64)(dword & 0xfc000000) | 0x3ffffff); | ||
84 | pci_read_config_dword(dev, IOH_LMMIOH_BASEU, &dword); | ||
85 | mmioh_base |= ((u64)(dword & 0x7ffff)) << 32; | ||
86 | pci_read_config_dword(dev, IOH_LMMIOH_LIMITU, &dword); | ||
87 | mmioh_end |= ((u64)(dword & 0x7ffff)) << 32; | ||
88 | update_res(info, mmioh_base, mmioh_end, IORESOURCE_MEM, 0); | ||
89 | |||
90 | print_ioh_resources(info); | ||
91 | } | ||
92 | |||
93 | /* intel IOH */ | ||
94 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, pci_root_bus_res); | ||