diff options
| author | Paul Mundt <lethal@linux-sh.org> | 2009-12-14 22:10:10 -0500 |
|---|---|---|
| committer | Paul Mundt <lethal@linux-sh.org> | 2009-12-14 22:10:10 -0500 |
| commit | e0aa51f54faa0659b529143de6c608e76675326f (patch) | |
| tree | 22fc566b74bfe6bd612a858ba354818900cdc394 /arch/x86 | |
| parent | 9f815a1765b0ce766ab1d26ef192d30410f70b2b (diff) | |
| parent | 3ea6b3d0e6d0ffd91c0f8cadeb69b7133c038b32 (diff) | |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/x86')
90 files changed, 1344 insertions, 1055 deletions
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 731318e5ac1d..bc01e3ebfeb2 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug | |||
| @@ -187,8 +187,8 @@ config HAVE_MMIOTRACE_SUPPORT | |||
| 187 | def_bool y | 187 | def_bool y |
| 188 | 188 | ||
| 189 | config X86_DECODER_SELFTEST | 189 | config X86_DECODER_SELFTEST |
| 190 | bool "x86 instruction decoder selftest" | 190 | bool "x86 instruction decoder selftest" |
| 191 | depends on DEBUG_KERNEL | 191 | depends on DEBUG_KERNEL && KPROBES |
| 192 | ---help--- | 192 | ---help--- |
| 193 | Perform x86 instruction decoder selftests at build time. | 193 | Perform x86 instruction decoder selftests at build time. |
| 194 | This option is useful for checking the sanity of x86 instruction | 194 | This option is useful for checking the sanity of x86 instruction |
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 4eefdca9832b..53147ad85b96 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
| @@ -696,7 +696,7 @@ ia32_sys_call_table: | |||
| 696 | .quad quiet_ni_syscall /* streams2 */ | 696 | .quad quiet_ni_syscall /* streams2 */ |
| 697 | .quad stub32_vfork /* 190 */ | 697 | .quad stub32_vfork /* 190 */ |
| 698 | .quad compat_sys_getrlimit | 698 | .quad compat_sys_getrlimit |
| 699 | .quad sys32_mmap2 | 699 | .quad sys_mmap_pgoff |
| 700 | .quad sys32_truncate64 | 700 | .quad sys32_truncate64 |
| 701 | .quad sys32_ftruncate64 | 701 | .quad sys32_ftruncate64 |
| 702 | .quad sys32_stat64 /* 195 */ | 702 | .quad sys32_stat64 /* 195 */ |
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index df82c0e48ded..422572c77923 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c | |||
| @@ -155,9 +155,6 @@ struct mmap_arg_struct { | |||
| 155 | asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg) | 155 | asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg) |
| 156 | { | 156 | { |
| 157 | struct mmap_arg_struct a; | 157 | struct mmap_arg_struct a; |
| 158 | struct file *file = NULL; | ||
| 159 | unsigned long retval; | ||
| 160 | struct mm_struct *mm ; | ||
| 161 | 158 | ||
| 162 | if (copy_from_user(&a, arg, sizeof(a))) | 159 | if (copy_from_user(&a, arg, sizeof(a))) |
| 163 | return -EFAULT; | 160 | return -EFAULT; |
| @@ -165,22 +162,8 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg) | |||
| 165 | if (a.offset & ~PAGE_MASK) | 162 | if (a.offset & ~PAGE_MASK) |
| 166 | return -EINVAL; | 163 | return -EINVAL; |
| 167 | 164 | ||
| 168 | if (!(a.flags & MAP_ANONYMOUS)) { | 165 | return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, |
| 169 | file = fget(a.fd); | ||
| 170 | if (!file) | ||
| 171 | return -EBADF; | ||
| 172 | } | ||
| 173 | |||
| 174 | mm = current->mm; | ||
| 175 | down_write(&mm->mmap_sem); | ||
| 176 | retval = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, | ||
| 177 | a.offset>>PAGE_SHIFT); | 166 | a.offset>>PAGE_SHIFT); |
| 178 | if (file) | ||
| 179 | fput(file); | ||
| 180 | |||
| 181 | up_write(&mm->mmap_sem); | ||
| 182 | |||
| 183 | return retval; | ||
| 184 | } | 167 | } |
| 185 | 168 | ||
| 186 | asmlinkage long sys32_mprotect(unsigned long start, size_t len, | 169 | asmlinkage long sys32_mprotect(unsigned long start, size_t len, |
| @@ -483,30 +466,6 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd, | |||
| 483 | return ret; | 466 | return ret; |
| 484 | } | 467 | } |
| 485 | 468 | ||
| 486 | asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len, | ||
| 487 | unsigned long prot, unsigned long flags, | ||
| 488 | unsigned long fd, unsigned long pgoff) | ||
| 489 | { | ||
| 490 | struct mm_struct *mm = current->mm; | ||
| 491 | unsigned long error; | ||
| 492 | struct file *file = NULL; | ||
| 493 | |||
| 494 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
| 495 | if (!(flags & MAP_ANONYMOUS)) { | ||
| 496 | file = fget(fd); | ||
| 497 | if (!file) | ||
| 498 | return -EBADF; | ||
| 499 | } | ||
| 500 | |||
| 501 | down_write(&mm->mmap_sem); | ||
| 502 | error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
| 503 | up_write(&mm->mmap_sem); | ||
| 504 | |||
| 505 | if (file) | ||
| 506 | fput(file); | ||
| 507 | return error; | ||
| 508 | } | ||
| 509 | |||
| 510 | asmlinkage long sys32_olduname(struct oldold_utsname __user *name) | 469 | asmlinkage long sys32_olduname(struct oldold_utsname __user *name) |
| 511 | { | 470 | { |
| 512 | char *arch = "x86_64"; | 471 | char *arch = "x86_64"; |
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h index 84786fb9a23b..4d817f9e6e77 100644 --- a/arch/x86/include/asm/amd_iommu_proto.h +++ b/arch/x86/include/asm/amd_iommu_proto.h | |||
| @@ -28,7 +28,9 @@ extern void amd_iommu_flush_all_domains(void); | |||
| 28 | extern void amd_iommu_flush_all_devices(void); | 28 | extern void amd_iommu_flush_all_devices(void); |
| 29 | extern void amd_iommu_apply_erratum_63(u16 devid); | 29 | extern void amd_iommu_apply_erratum_63(u16 devid); |
| 30 | extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); | 30 | extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); |
| 31 | 31 | extern int amd_iommu_init_devices(void); | |
| 32 | extern void amd_iommu_uninit_devices(void); | ||
| 33 | extern void amd_iommu_init_notifier(void); | ||
| 32 | #ifndef CONFIG_AMD_IOMMU_STATS | 34 | #ifndef CONFIG_AMD_IOMMU_STATS |
| 33 | 35 | ||
| 34 | static inline void amd_iommu_stats_init(void) { } | 36 | static inline void amd_iommu_stats_init(void) { } |
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 6a635bd39867..4611f085cd43 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
| @@ -113,7 +113,7 @@ | |||
| 113 | */ | 113 | */ |
| 114 | #define LOCAL_PENDING_VECTOR 0xec | 114 | #define LOCAL_PENDING_VECTOR 0xec |
| 115 | 115 | ||
| 116 | #define UV_BAU_MESSAGE 0xec | 116 | #define UV_BAU_MESSAGE 0xea |
| 117 | 117 | ||
| 118 | /* | 118 | /* |
| 119 | * Self IPI vector for machine checks | 119 | * Self IPI vector for machine checks |
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 5bef931f8b14..2d228fc9b4b7 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h | |||
| @@ -244,6 +244,9 @@ do { \ | |||
| 244 | 244 | ||
| 245 | #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) | 245 | #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) |
| 246 | 246 | ||
| 247 | struct msr *msrs_alloc(void); | ||
| 248 | void msrs_free(struct msr *msrs); | ||
| 249 | |||
| 247 | #ifdef CONFIG_SMP | 250 | #ifdef CONFIG_SMP |
| 248 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | 251 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
| 249 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | 252 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index b399988eee3a..b4bf9a942ed0 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h | |||
| @@ -118,11 +118,27 @@ extern int __init pcibios_init(void); | |||
| 118 | 118 | ||
| 119 | /* pci-mmconfig.c */ | 119 | /* pci-mmconfig.c */ |
| 120 | 120 | ||
| 121 | /* "PCI MMCONFIG %04x [bus %02x-%02x]" */ | ||
| 122 | #define PCI_MMCFG_RESOURCE_NAME_LEN (22 + 4 + 2 + 2) | ||
| 123 | |||
| 124 | struct pci_mmcfg_region { | ||
| 125 | struct list_head list; | ||
| 126 | struct resource res; | ||
| 127 | u64 address; | ||
| 128 | char __iomem *virt; | ||
| 129 | u16 segment; | ||
| 130 | u8 start_bus; | ||
| 131 | u8 end_bus; | ||
| 132 | char name[PCI_MMCFG_RESOURCE_NAME_LEN]; | ||
| 133 | }; | ||
| 134 | |||
| 121 | extern int __init pci_mmcfg_arch_init(void); | 135 | extern int __init pci_mmcfg_arch_init(void); |
| 122 | extern void __init pci_mmcfg_arch_free(void); | 136 | extern void __init pci_mmcfg_arch_free(void); |
| 137 | extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus); | ||
| 138 | |||
| 139 | extern struct list_head pci_mmcfg_list; | ||
| 123 | 140 | ||
| 124 | extern struct acpi_mcfg_allocation *pci_mmcfg_config; | 141 | #define PCI_MMCFG_BUS_OFFSET(bus) ((bus) << 20) |
| 125 | extern int pci_mmcfg_config_num; | ||
| 126 | 142 | ||
| 127 | /* | 143 | /* |
| 128 | * AMD Fam10h CPUs are buggy, and cannot access MMIO config space | 144 | * AMD Fam10h CPUs are buggy, and cannot access MMIO config space |
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index b65a36defeb7..0c44196b78ac 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
| @@ -74,31 +74,31 @@ extern void __bad_percpu_size(void); | |||
| 74 | 74 | ||
| 75 | #define percpu_to_op(op, var, val) \ | 75 | #define percpu_to_op(op, var, val) \ |
| 76 | do { \ | 76 | do { \ |
| 77 | typedef typeof(var) T__; \ | 77 | typedef typeof(var) pto_T__; \ |
| 78 | if (0) { \ | 78 | if (0) { \ |
| 79 | T__ tmp__; \ | 79 | pto_T__ pto_tmp__; \ |
| 80 | tmp__ = (val); \ | 80 | pto_tmp__ = (val); \ |
| 81 | } \ | 81 | } \ |
| 82 | switch (sizeof(var)) { \ | 82 | switch (sizeof(var)) { \ |
| 83 | case 1: \ | 83 | case 1: \ |
| 84 | asm(op "b %1,"__percpu_arg(0) \ | 84 | asm(op "b %1,"__percpu_arg(0) \ |
| 85 | : "+m" (var) \ | 85 | : "+m" (var) \ |
| 86 | : "qi" ((T__)(val))); \ | 86 | : "qi" ((pto_T__)(val))); \ |
| 87 | break; \ | 87 | break; \ |
| 88 | case 2: \ | 88 | case 2: \ |
| 89 | asm(op "w %1,"__percpu_arg(0) \ | 89 | asm(op "w %1,"__percpu_arg(0) \ |
| 90 | : "+m" (var) \ | 90 | : "+m" (var) \ |
| 91 | : "ri" ((T__)(val))); \ | 91 | : "ri" ((pto_T__)(val))); \ |
| 92 | break; \ | 92 | break; \ |
| 93 | case 4: \ | 93 | case 4: \ |
| 94 | asm(op "l %1,"__percpu_arg(0) \ | 94 | asm(op "l %1,"__percpu_arg(0) \ |
| 95 | : "+m" (var) \ | 95 | : "+m" (var) \ |
| 96 | : "ri" ((T__)(val))); \ | 96 | : "ri" ((pto_T__)(val))); \ |
| 97 | break; \ | 97 | break; \ |
| 98 | case 8: \ | 98 | case 8: \ |
| 99 | asm(op "q %1,"__percpu_arg(0) \ | 99 | asm(op "q %1,"__percpu_arg(0) \ |
| 100 | : "+m" (var) \ | 100 | : "+m" (var) \ |
| 101 | : "re" ((T__)(val))); \ | 101 | : "re" ((pto_T__)(val))); \ |
| 102 | break; \ | 102 | break; \ |
| 103 | default: __bad_percpu_size(); \ | 103 | default: __bad_percpu_size(); \ |
| 104 | } \ | 104 | } \ |
| @@ -106,31 +106,31 @@ do { \ | |||
| 106 | 106 | ||
| 107 | #define percpu_from_op(op, var, constraint) \ | 107 | #define percpu_from_op(op, var, constraint) \ |
| 108 | ({ \ | 108 | ({ \ |
| 109 | typeof(var) ret__; \ | 109 | typeof(var) pfo_ret__; \ |
| 110 | switch (sizeof(var)) { \ | 110 | switch (sizeof(var)) { \ |
| 111 | case 1: \ | 111 | case 1: \ |
| 112 | asm(op "b "__percpu_arg(1)",%0" \ | 112 | asm(op "b "__percpu_arg(1)",%0" \ |
| 113 | : "=q" (ret__) \ | 113 | : "=q" (pfo_ret__) \ |
| 114 | : constraint); \ | 114 | : constraint); \ |
| 115 | break; \ | 115 | break; \ |
| 116 | case 2: \ | 116 | case 2: \ |
| 117 | asm(op "w "__percpu_arg(1)",%0" \ | 117 | asm(op "w "__percpu_arg(1)",%0" \ |
| 118 | : "=r" (ret__) \ | 118 | : "=r" (pfo_ret__) \ |
| 119 | : constraint); \ | 119 | : constraint); \ |
| 120 | break; \ | 120 | break; \ |
| 121 | case 4: \ | 121 | case 4: \ |
| 122 | asm(op "l "__percpu_arg(1)",%0" \ | 122 | asm(op "l "__percpu_arg(1)",%0" \ |
| 123 | : "=r" (ret__) \ | 123 | : "=r" (pfo_ret__) \ |
| 124 | : constraint); \ | 124 | : constraint); \ |
| 125 | break; \ | 125 | break; \ |
| 126 | case 8: \ | 126 | case 8: \ |
| 127 | asm(op "q "__percpu_arg(1)",%0" \ | 127 | asm(op "q "__percpu_arg(1)",%0" \ |
| 128 | : "=r" (ret__) \ | 128 | : "=r" (pfo_ret__) \ |
| 129 | : constraint); \ | 129 | : constraint); \ |
| 130 | break; \ | 130 | break; \ |
| 131 | default: __bad_percpu_size(); \ | 131 | default: __bad_percpu_size(); \ |
| 132 | } \ | 132 | } \ |
| 133 | ret__; \ | 133 | pfo_ret__; \ |
| 134 | }) | 134 | }) |
| 135 | 135 | ||
| 136 | /* | 136 | /* |
| @@ -153,6 +153,84 @@ do { \ | |||
| 153 | #define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val) | 153 | #define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val) |
| 154 | #define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val) | 154 | #define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val) |
| 155 | 155 | ||
| 156 | #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
| 157 | #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
| 158 | #define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
| 159 | |||
| 160 | #define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) | ||
| 161 | #define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) | ||
| 162 | #define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) | ||
| 163 | #define __this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val) | ||
| 164 | #define __this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val) | ||
| 165 | #define __this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val) | ||
| 166 | #define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) | ||
| 167 | #define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) | ||
| 168 | #define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) | ||
| 169 | #define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) | ||
| 170 | #define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) | ||
| 171 | #define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) | ||
| 172 | #define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) | ||
| 173 | #define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | ||
| 174 | #define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | ||
| 175 | |||
| 176 | #define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
| 177 | #define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
| 178 | #define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
| 179 | #define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) | ||
| 180 | #define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) | ||
| 181 | #define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) | ||
| 182 | #define this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val) | ||
| 183 | #define this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val) | ||
| 184 | #define this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val) | ||
| 185 | #define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) | ||
| 186 | #define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) | ||
| 187 | #define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) | ||
| 188 | #define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) | ||
| 189 | #define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) | ||
| 190 | #define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) | ||
| 191 | #define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) | ||
| 192 | #define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | ||
| 193 | #define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | ||
| 194 | |||
| 195 | #define irqsafe_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val) | ||
| 196 | #define irqsafe_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val) | ||
| 197 | #define irqsafe_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val) | ||
| 198 | #define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) | ||
| 199 | #define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) | ||
| 200 | #define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) | ||
| 201 | #define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) | ||
| 202 | #define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) | ||
| 203 | #define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) | ||
| 204 | #define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) | ||
| 205 | #define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) | ||
| 206 | #define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) | ||
| 207 | |||
| 208 | /* | ||
| 209 | * Per cpu atomic 64 bit operations are only available under 64 bit. | ||
| 210 | * 32 bit must fall back to generic operations. | ||
| 211 | */ | ||
| 212 | #ifdef CONFIG_X86_64 | ||
| 213 | #define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
| 214 | #define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) | ||
| 215 | #define __this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val) | ||
| 216 | #define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | ||
| 217 | #define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | ||
| 218 | #define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | ||
| 219 | |||
| 220 | #define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | ||
| 221 | #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) | ||
| 222 | #define this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val) | ||
| 223 | #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | ||
| 224 | #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | ||
| 225 | #define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | ||
| 226 | |||
| 227 | #define irqsafe_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val) | ||
| 228 | #define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) | ||
| 229 | #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) | ||
| 230 | #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) | ||
| 231 | |||
| 232 | #endif | ||
| 233 | |||
| 156 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ | 234 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ |
| 157 | #define x86_test_and_clear_bit_percpu(bit, var) \ | 235 | #define x86_test_and_clear_bit_percpu(bit, var) \ |
| 158 | ({ \ | 236 | ({ \ |
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h index 9af9decb38c3..4a5a089e1c62 100644 --- a/arch/x86/include/asm/sys_ia32.h +++ b/arch/x86/include/asm/sys_ia32.h | |||
| @@ -57,9 +57,6 @@ asmlinkage long sys32_pwrite(unsigned int, char __user *, u32, u32, u32); | |||
| 57 | asmlinkage long sys32_personality(unsigned long); | 57 | asmlinkage long sys32_personality(unsigned long); |
| 58 | asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); | 58 | asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); |
| 59 | 59 | ||
| 60 | asmlinkage long sys32_mmap2(unsigned long, unsigned long, unsigned long, | ||
| 61 | unsigned long, unsigned long, unsigned long); | ||
| 62 | |||
| 63 | struct oldold_utsname; | 60 | struct oldold_utsname; |
| 64 | struct old_utsname; | 61 | struct old_utsname; |
| 65 | asmlinkage long sys32_olduname(struct oldold_utsname __user *); | 62 | asmlinkage long sys32_olduname(struct oldold_utsname __user *); |
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 372b76edd63f..1bb6e395881c 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h | |||
| @@ -55,8 +55,6 @@ struct sel_arg_struct; | |||
| 55 | struct oldold_utsname; | 55 | struct oldold_utsname; |
| 56 | struct old_utsname; | 56 | struct old_utsname; |
| 57 | 57 | ||
| 58 | asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long, | ||
| 59 | unsigned long, unsigned long, unsigned long); | ||
| 60 | asmlinkage int old_mmap(struct mmap_arg_struct __user *); | 58 | asmlinkage int old_mmap(struct mmap_arg_struct __user *); |
| 61 | asmlinkage int old_select(struct sel_arg_struct __user *); | 59 | asmlinkage int old_select(struct sel_arg_struct __user *); |
| 62 | asmlinkage int sys_ipc(uint, int, int, int, void __user *, long); | 60 | asmlinkage int sys_ipc(uint, int, int, int, void __user *, long); |
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index 022a84386de8..ecb544e65382 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h | |||
| @@ -23,6 +23,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
| 23 | struct tss_struct; | 23 | struct tss_struct; |
| 24 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | 24 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, |
| 25 | struct tss_struct *tss); | 25 | struct tss_struct *tss); |
| 26 | extern void show_regs_common(void); | ||
| 26 | 27 | ||
| 27 | #ifdef CONFIG_X86_32 | 28 | #ifdef CONFIG_X86_32 |
| 28 | 29 | ||
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h index 90f06c25221d..cb507bb05d79 100644 --- a/arch/x86/include/asm/trampoline.h +++ b/arch/x86/include/asm/trampoline.h | |||
| @@ -16,7 +16,6 @@ extern unsigned long initial_code; | |||
| 16 | extern unsigned long initial_gs; | 16 | extern unsigned long initial_gs; |
| 17 | 17 | ||
| 18 | #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) | 18 | #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) |
| 19 | #define TRAMPOLINE_BASE 0x6000 | ||
| 20 | 19 | ||
| 21 | extern unsigned long setup_trampoline(void); | 20 | extern unsigned long setup_trampoline(void); |
| 22 | extern void __init reserve_trampoline_memory(void); | 21 | extern void __init reserve_trampoline_memory(void); |
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h index d5b7e90c0edf..396ff4cc8ed4 100644 --- a/arch/x86/include/asm/xen/hypervisor.h +++ b/arch/x86/include/asm/xen/hypervisor.h | |||
| @@ -37,31 +37,4 @@ | |||
| 37 | extern struct shared_info *HYPERVISOR_shared_info; | 37 | extern struct shared_info *HYPERVISOR_shared_info; |
| 38 | extern struct start_info *xen_start_info; | 38 | extern struct start_info *xen_start_info; |
| 39 | 39 | ||
| 40 | enum xen_domain_type { | ||
| 41 | XEN_NATIVE, /* running on bare hardware */ | ||
| 42 | XEN_PV_DOMAIN, /* running in a PV domain */ | ||
| 43 | XEN_HVM_DOMAIN, /* running in a Xen hvm domain */ | ||
| 44 | }; | ||
| 45 | |||
| 46 | #ifdef CONFIG_XEN | ||
| 47 | extern enum xen_domain_type xen_domain_type; | ||
| 48 | #else | ||
| 49 | #define xen_domain_type XEN_NATIVE | ||
| 50 | #endif | ||
| 51 | |||
| 52 | #define xen_domain() (xen_domain_type != XEN_NATIVE) | ||
| 53 | #define xen_pv_domain() (xen_domain() && \ | ||
| 54 | xen_domain_type == XEN_PV_DOMAIN) | ||
| 55 | #define xen_hvm_domain() (xen_domain() && \ | ||
| 56 | xen_domain_type == XEN_HVM_DOMAIN) | ||
| 57 | |||
| 58 | #ifdef CONFIG_XEN_DOM0 | ||
| 59 | #include <xen/interface/xen.h> | ||
| 60 | |||
| 61 | #define xen_initial_domain() (xen_pv_domain() && \ | ||
| 62 | xen_start_info->flags & SIF_INITDOMAIN) | ||
| 63 | #else /* !CONFIG_XEN_DOM0 */ | ||
| 64 | #define xen_initial_domain() (0) | ||
| 65 | #endif /* CONFIG_XEN_DOM0 */ | ||
| 66 | |||
| 67 | #endif /* _ASM_X86_XEN_HYPERVISOR_H */ | 40 | #endif /* _ASM_X86_XEN_HYPERVISOR_H */ |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 1c0fb4d4ad55..b990b5cc9541 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
| @@ -166,6 +166,43 @@ static void iommu_uninit_device(struct device *dev) | |||
| 166 | { | 166 | { |
| 167 | kfree(dev->archdata.iommu); | 167 | kfree(dev->archdata.iommu); |
| 168 | } | 168 | } |
| 169 | |||
| 170 | void __init amd_iommu_uninit_devices(void) | ||
| 171 | { | ||
| 172 | struct pci_dev *pdev = NULL; | ||
| 173 | |||
| 174 | for_each_pci_dev(pdev) { | ||
| 175 | |||
| 176 | if (!check_device(&pdev->dev)) | ||
| 177 | continue; | ||
| 178 | |||
| 179 | iommu_uninit_device(&pdev->dev); | ||
| 180 | } | ||
| 181 | } | ||
| 182 | |||
| 183 | int __init amd_iommu_init_devices(void) | ||
| 184 | { | ||
| 185 | struct pci_dev *pdev = NULL; | ||
| 186 | int ret = 0; | ||
| 187 | |||
| 188 | for_each_pci_dev(pdev) { | ||
| 189 | |||
| 190 | if (!check_device(&pdev->dev)) | ||
| 191 | continue; | ||
| 192 | |||
| 193 | ret = iommu_init_device(&pdev->dev); | ||
| 194 | if (ret) | ||
| 195 | goto out_free; | ||
| 196 | } | ||
| 197 | |||
| 198 | return 0; | ||
| 199 | |||
| 200 | out_free: | ||
| 201 | |||
| 202 | amd_iommu_uninit_devices(); | ||
| 203 | |||
| 204 | return ret; | ||
| 205 | } | ||
| 169 | #ifdef CONFIG_AMD_IOMMU_STATS | 206 | #ifdef CONFIG_AMD_IOMMU_STATS |
| 170 | 207 | ||
| 171 | /* | 208 | /* |
| @@ -1587,6 +1624,11 @@ static struct notifier_block device_nb = { | |||
| 1587 | .notifier_call = device_change_notifier, | 1624 | .notifier_call = device_change_notifier, |
| 1588 | }; | 1625 | }; |
| 1589 | 1626 | ||
| 1627 | void amd_iommu_init_notifier(void) | ||
| 1628 | { | ||
| 1629 | bus_register_notifier(&pci_bus_type, &device_nb); | ||
| 1630 | } | ||
| 1631 | |||
| 1590 | /***************************************************************************** | 1632 | /***************************************************************************** |
| 1591 | * | 1633 | * |
| 1592 | * The next functions belong to the dma_ops mapping/unmapping code. | 1634 | * The next functions belong to the dma_ops mapping/unmapping code. |
| @@ -2145,8 +2187,6 @@ static void prealloc_protection_domains(void) | |||
| 2145 | if (!check_device(&dev->dev)) | 2187 | if (!check_device(&dev->dev)) |
| 2146 | continue; | 2188 | continue; |
| 2147 | 2189 | ||
| 2148 | iommu_init_device(&dev->dev); | ||
| 2149 | |||
| 2150 | /* Is there already any domain for it? */ | 2190 | /* Is there already any domain for it? */ |
| 2151 | if (domain_for_device(&dev->dev)) | 2191 | if (domain_for_device(&dev->dev)) |
| 2152 | continue; | 2192 | continue; |
| @@ -2215,8 +2255,6 @@ int __init amd_iommu_init_dma_ops(void) | |||
| 2215 | 2255 | ||
| 2216 | register_iommu(&amd_iommu_ops); | 2256 | register_iommu(&amd_iommu_ops); |
| 2217 | 2257 | ||
| 2218 | bus_register_notifier(&pci_bus_type, &device_nb); | ||
| 2219 | |||
| 2220 | amd_iommu_stats_init(); | 2258 | amd_iommu_stats_init(); |
| 2221 | 2259 | ||
| 2222 | return 0; | 2260 | return 0; |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 7ffc39965233..1dca9c34eaeb 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
| @@ -1274,6 +1274,10 @@ static int __init amd_iommu_init(void) | |||
| 1274 | if (ret) | 1274 | if (ret) |
| 1275 | goto free; | 1275 | goto free; |
| 1276 | 1276 | ||
| 1277 | ret = amd_iommu_init_devices(); | ||
| 1278 | if (ret) | ||
| 1279 | goto free; | ||
| 1280 | |||
| 1277 | if (iommu_pass_through) | 1281 | if (iommu_pass_through) |
| 1278 | ret = amd_iommu_init_passthrough(); | 1282 | ret = amd_iommu_init_passthrough(); |
| 1279 | else | 1283 | else |
| @@ -1281,6 +1285,8 @@ static int __init amd_iommu_init(void) | |||
| 1281 | if (ret) | 1285 | if (ret) |
| 1282 | goto free; | 1286 | goto free; |
| 1283 | 1287 | ||
| 1288 | amd_iommu_init_notifier(); | ||
| 1289 | |||
| 1284 | enable_iommus(); | 1290 | enable_iommus(); |
| 1285 | 1291 | ||
| 1286 | if (iommu_pass_through) | 1292 | if (iommu_pass_through) |
| @@ -1296,6 +1302,9 @@ out: | |||
| 1296 | return ret; | 1302 | return ret; |
| 1297 | 1303 | ||
| 1298 | free: | 1304 | free: |
| 1305 | |||
| 1306 | amd_iommu_uninit_devices(); | ||
| 1307 | |||
| 1299 | free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, | 1308 | free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, |
| 1300 | get_order(MAX_DOMAIN_ID/8)); | 1309 | get_order(MAX_DOMAIN_ID/8)); |
| 1301 | 1310 | ||
| @@ -1336,6 +1345,9 @@ void __init amd_iommu_detect(void) | |||
| 1336 | iommu_detected = 1; | 1345 | iommu_detected = 1; |
| 1337 | amd_iommu_detected = 1; | 1346 | amd_iommu_detected = 1; |
| 1338 | x86_init.iommu.iommu_init = amd_iommu_init; | 1347 | x86_init.iommu.iommu_init = amd_iommu_init; |
| 1348 | |||
| 1349 | /* Make sure ACS will be enabled */ | ||
| 1350 | pci_request_acs(); | ||
| 1339 | } | 1351 | } |
| 1340 | } | 1352 | } |
| 1341 | 1353 | ||
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index e0dfb6856aa2..3704997e8b25 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
| @@ -280,7 +280,8 @@ void __init early_gart_iommu_check(void) | |||
| 280 | * or BIOS forget to put that in reserved. | 280 | * or BIOS forget to put that in reserved. |
| 281 | * try to update e820 to make that region as reserved. | 281 | * try to update e820 to make that region as reserved. |
| 282 | */ | 282 | */ |
| 283 | int i, fix, slot; | 283 | u32 agp_aper_base = 0, agp_aper_order = 0; |
| 284 | int i, fix, slot, valid_agp = 0; | ||
| 284 | u32 ctl; | 285 | u32 ctl; |
| 285 | u32 aper_size = 0, aper_order = 0, last_aper_order = 0; | 286 | u32 aper_size = 0, aper_order = 0, last_aper_order = 0; |
| 286 | u64 aper_base = 0, last_aper_base = 0; | 287 | u64 aper_base = 0, last_aper_base = 0; |
| @@ -290,6 +291,8 @@ void __init early_gart_iommu_check(void) | |||
| 290 | return; | 291 | return; |
| 291 | 292 | ||
| 292 | /* This is mostly duplicate of iommu_hole_init */ | 293 | /* This is mostly duplicate of iommu_hole_init */ |
| 294 | agp_aper_base = search_agp_bridge(&agp_aper_order, &valid_agp); | ||
| 295 | |||
| 293 | fix = 0; | 296 | fix = 0; |
| 294 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { | 297 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { |
| 295 | int bus; | 298 | int bus; |
| @@ -342,10 +345,10 @@ void __init early_gart_iommu_check(void) | |||
| 342 | } | 345 | } |
| 343 | } | 346 | } |
| 344 | 347 | ||
| 345 | if (!fix) | 348 | if (valid_agp) |
| 346 | return; | 349 | return; |
| 347 | 350 | ||
| 348 | /* different nodes have different setting, disable them all at first*/ | 351 | /* disable them all at first */ |
| 349 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { | 352 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { |
| 350 | int bus; | 353 | int bus; |
| 351 | int dev_base, dev_limit; | 354 | int dev_base, dev_limit; |
| @@ -458,8 +461,6 @@ out: | |||
| 458 | 461 | ||
| 459 | if (aper_alloc) { | 462 | if (aper_alloc) { |
| 460 | /* Got the aperture from the AGP bridge */ | 463 | /* Got the aperture from the AGP bridge */ |
| 461 | } else if (!valid_agp) { | ||
| 462 | /* Do nothing */ | ||
| 463 | } else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) || | 464 | } else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) || |
| 464 | force_iommu || | 465 | force_iommu || |
| 465 | valid_agp || | 466 | valid_agp || |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index efb2b9cd132c..aa57c079c98f 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
| @@ -1341,7 +1341,7 @@ void enable_x2apic(void) | |||
| 1341 | 1341 | ||
| 1342 | rdmsr(MSR_IA32_APICBASE, msr, msr2); | 1342 | rdmsr(MSR_IA32_APICBASE, msr, msr2); |
| 1343 | if (!(msr & X2APIC_ENABLE)) { | 1343 | if (!(msr & X2APIC_ENABLE)) { |
| 1344 | pr_info("Enabling x2apic\n"); | 1344 | printk_once(KERN_INFO "Enabling x2apic\n"); |
| 1345 | wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0); | 1345 | wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0); |
| 1346 | } | 1346 | } |
| 1347 | } | 1347 | } |
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index d9acc3bee0f4..e31b9ffe25f5 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c | |||
| @@ -127,7 +127,7 @@ static u32 noop_apic_read(u32 reg) | |||
| 127 | 127 | ||
| 128 | static void noop_apic_write(u32 reg, u32 v) | 128 | static void noop_apic_write(u32 reg, u32 v) |
| 129 | { | 129 | { |
| 130 | WARN_ON_ONCE((cpu_has_apic || !disable_apic)); | 130 | WARN_ON_ONCE(cpu_has_apic && !disable_apic); |
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | struct apic apic_noop = { | 133 | struct apic apic_noop = { |
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index e85f8fb7f8e7..dd2b5f264643 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
| @@ -27,6 +27,9 @@ | |||
| 27 | * | 27 | * |
| 28 | * http://www.unisys.com | 28 | * http://www.unisys.com |
| 29 | */ | 29 | */ |
| 30 | |||
| 31 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 32 | |||
| 30 | #include <linux/notifier.h> | 33 | #include <linux/notifier.h> |
| 31 | #include <linux/spinlock.h> | 34 | #include <linux/spinlock.h> |
| 32 | #include <linux/cpumask.h> | 35 | #include <linux/cpumask.h> |
| @@ -223,9 +226,9 @@ static int parse_unisys_oem(char *oemptr) | |||
| 223 | mip_addr = val; | 226 | mip_addr = val; |
| 224 | mip = (struct mip_reg *)val; | 227 | mip = (struct mip_reg *)val; |
| 225 | mip_reg = __va(mip); | 228 | mip_reg = __va(mip); |
| 226 | pr_debug("es7000_mipcfg: host_reg = 0x%lx \n", | 229 | pr_debug("host_reg = 0x%lx\n", |
| 227 | (unsigned long)host_reg); | 230 | (unsigned long)host_reg); |
| 228 | pr_debug("es7000_mipcfg: mip_reg = 0x%lx \n", | 231 | pr_debug("mip_reg = 0x%lx\n", |
| 229 | (unsigned long)mip_reg); | 232 | (unsigned long)mip_reg); |
| 230 | success++; | 233 | success++; |
| 231 | break; | 234 | break; |
| @@ -401,7 +404,7 @@ static void es7000_enable_apic_mode(void) | |||
| 401 | if (!es7000_plat) | 404 | if (!es7000_plat) |
| 402 | return; | 405 | return; |
| 403 | 406 | ||
| 404 | printk(KERN_INFO "ES7000: Enabling APIC mode.\n"); | 407 | pr_info("Enabling APIC mode.\n"); |
| 405 | memset(&es7000_mip_reg, 0, sizeof(struct mip_reg)); | 408 | memset(&es7000_mip_reg, 0, sizeof(struct mip_reg)); |
| 406 | es7000_mip_reg.off_0x00 = MIP_SW_APIC; | 409 | es7000_mip_reg.off_0x00 = MIP_SW_APIC; |
| 407 | es7000_mip_reg.off_0x38 = MIP_VALID; | 410 | es7000_mip_reg.off_0x38 = MIP_VALID; |
| @@ -514,8 +517,7 @@ static void es7000_setup_apic_routing(void) | |||
| 514 | { | 517 | { |
| 515 | int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); | 518 | int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); |
| 516 | 519 | ||
| 517 | printk(KERN_INFO | 520 | pr_info("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", |
| 518 | "Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", | ||
| 519 | (apic_version[apic] == 0x14) ? | 521 | (apic_version[apic] == 0x14) ? |
| 520 | "Physical Cluster" : "Logical Cluster", | 522 | "Physical Cluster" : "Logical Cluster", |
| 521 | nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); | 523 | nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); |
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index 6389432a9dbf..0159a69396cb 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
| @@ -361,7 +361,7 @@ void stop_apic_nmi_watchdog(void *unused) | |||
| 361 | */ | 361 | */ |
| 362 | 362 | ||
| 363 | static DEFINE_PER_CPU(unsigned, last_irq_sum); | 363 | static DEFINE_PER_CPU(unsigned, last_irq_sum); |
| 364 | static DEFINE_PER_CPU(local_t, alert_counter); | 364 | static DEFINE_PER_CPU(long, alert_counter); |
| 365 | static DEFINE_PER_CPU(int, nmi_touch); | 365 | static DEFINE_PER_CPU(int, nmi_touch); |
| 366 | 366 | ||
| 367 | void touch_nmi_watchdog(void) | 367 | void touch_nmi_watchdog(void) |
| @@ -438,8 +438,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
| 438 | * Ayiee, looks like this CPU is stuck ... | 438 | * Ayiee, looks like this CPU is stuck ... |
| 439 | * wait a few IRQs (5 seconds) before doing the oops ... | 439 | * wait a few IRQs (5 seconds) before doing the oops ... |
| 440 | */ | 440 | */ |
| 441 | local_inc(&__get_cpu_var(alert_counter)); | 441 | __this_cpu_inc(per_cpu_var(alert_counter)); |
| 442 | if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz) | 442 | if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz) |
| 443 | /* | 443 | /* |
| 444 | * die_nmi will return ONLY if NOTIFY_STOP happens.. | 444 | * die_nmi will return ONLY if NOTIFY_STOP happens.. |
| 445 | */ | 445 | */ |
| @@ -447,7 +447,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
| 447 | regs, panic_on_timeout); | 447 | regs, panic_on_timeout); |
| 448 | } else { | 448 | } else { |
| 449 | __get_cpu_var(last_irq_sum) = sum; | 449 | __get_cpu_var(last_irq_sum) = sum; |
| 450 | local_set(&__get_cpu_var(alert_counter), 0); | 450 | __this_cpu_write(per_cpu_var(alert_counter), 0); |
| 451 | } | 451 | } |
| 452 | 452 | ||
| 453 | /* see if the nmi watchdog went off */ | 453 | /* see if the nmi watchdog went off */ |
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index c965e5212714..468489b57aae 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c | |||
| @@ -74,6 +74,7 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | |||
| 74 | unsigned int eax, ebx, ecx, edx, sub_index; | 74 | unsigned int eax, ebx, ecx, edx, sub_index; |
| 75 | unsigned int ht_mask_width, core_plus_mask_width; | 75 | unsigned int ht_mask_width, core_plus_mask_width; |
| 76 | unsigned int core_select_mask, core_level_siblings; | 76 | unsigned int core_select_mask, core_level_siblings; |
| 77 | static bool printed; | ||
| 77 | 78 | ||
| 78 | if (c->cpuid_level < 0xb) | 79 | if (c->cpuid_level < 0xb) |
| 79 | return; | 80 | return; |
| @@ -127,12 +128,14 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | |||
| 127 | 128 | ||
| 128 | c->x86_max_cores = (core_level_siblings / smp_num_siblings); | 129 | c->x86_max_cores = (core_level_siblings / smp_num_siblings); |
| 129 | 130 | ||
| 130 | 131 | if (!printed) { | |
| 131 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | 132 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", |
| 132 | c->phys_proc_id); | 133 | c->phys_proc_id); |
| 133 | if (c->x86_max_cores > 1) | 134 | if (c->x86_max_cores > 1) |
| 134 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | 135 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", |
| 135 | c->cpu_core_id); | 136 | c->cpu_core_id); |
| 137 | printed = 1; | ||
| 138 | } | ||
| 136 | return; | 139 | return; |
| 137 | #endif | 140 | #endif |
| 138 | } | 141 | } |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 7128b3799cec..8dc3ea145c97 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
| @@ -375,8 +375,6 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | |||
| 375 | node = nearby_node(apicid); | 375 | node = nearby_node(apicid); |
| 376 | } | 376 | } |
| 377 | numa_set_node(cpu, node); | 377 | numa_set_node(cpu, node); |
| 378 | |||
| 379 | printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node); | ||
| 380 | #endif | 378 | #endif |
| 381 | } | 379 | } |
| 382 | 380 | ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c1afa990a6c8..4868e4a951ee 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
| @@ -427,6 +427,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
| 427 | #ifdef CONFIG_X86_HT | 427 | #ifdef CONFIG_X86_HT |
| 428 | u32 eax, ebx, ecx, edx; | 428 | u32 eax, ebx, ecx, edx; |
| 429 | int index_msb, core_bits; | 429 | int index_msb, core_bits; |
| 430 | static bool printed; | ||
| 430 | 431 | ||
| 431 | if (!cpu_has(c, X86_FEATURE_HT)) | 432 | if (!cpu_has(c, X86_FEATURE_HT)) |
| 432 | return; | 433 | return; |
| @@ -442,7 +443,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
| 442 | smp_num_siblings = (ebx & 0xff0000) >> 16; | 443 | smp_num_siblings = (ebx & 0xff0000) >> 16; |
| 443 | 444 | ||
| 444 | if (smp_num_siblings == 1) { | 445 | if (smp_num_siblings == 1) { |
| 445 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | 446 | printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n"); |
| 446 | goto out; | 447 | goto out; |
| 447 | } | 448 | } |
| 448 | 449 | ||
| @@ -469,11 +470,12 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
| 469 | ((1 << core_bits) - 1); | 470 | ((1 << core_bits) - 1); |
| 470 | 471 | ||
| 471 | out: | 472 | out: |
| 472 | if ((c->x86_max_cores * smp_num_siblings) > 1) { | 473 | if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { |
| 473 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | 474 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", |
| 474 | c->phys_proc_id); | 475 | c->phys_proc_id); |
| 475 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | 476 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", |
| 476 | c->cpu_core_id); | 477 | c->cpu_core_id); |
| 478 | printed = 1; | ||
| 477 | } | 479 | } |
| 478 | #endif | 480 | #endif |
| 479 | } | 481 | } |
| @@ -1093,7 +1095,7 @@ static void clear_all_debug_regs(void) | |||
| 1093 | 1095 | ||
| 1094 | void __cpuinit cpu_init(void) | 1096 | void __cpuinit cpu_init(void) |
| 1095 | { | 1097 | { |
| 1096 | struct orig_ist *orig_ist; | 1098 | struct orig_ist *oist; |
| 1097 | struct task_struct *me; | 1099 | struct task_struct *me; |
| 1098 | struct tss_struct *t; | 1100 | struct tss_struct *t; |
| 1099 | unsigned long v; | 1101 | unsigned long v; |
| @@ -1102,7 +1104,7 @@ void __cpuinit cpu_init(void) | |||
| 1102 | 1104 | ||
| 1103 | cpu = stack_smp_processor_id(); | 1105 | cpu = stack_smp_processor_id(); |
| 1104 | t = &per_cpu(init_tss, cpu); | 1106 | t = &per_cpu(init_tss, cpu); |
| 1105 | orig_ist = &per_cpu(orig_ist, cpu); | 1107 | oist = &per_cpu(orig_ist, cpu); |
| 1106 | 1108 | ||
| 1107 | #ifdef CONFIG_NUMA | 1109 | #ifdef CONFIG_NUMA |
| 1108 | if (cpu != 0 && percpu_read(node_number) == 0 && | 1110 | if (cpu != 0 && percpu_read(node_number) == 0 && |
| @@ -1115,7 +1117,7 @@ void __cpuinit cpu_init(void) | |||
| 1115 | if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) | 1117 | if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) |
| 1116 | panic("CPU#%d already initialized!\n", cpu); | 1118 | panic("CPU#%d already initialized!\n", cpu); |
| 1117 | 1119 | ||
| 1118 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | 1120 | pr_debug("Initializing CPU#%d\n", cpu); |
| 1119 | 1121 | ||
| 1120 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | 1122 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
| 1121 | 1123 | ||
| @@ -1143,12 +1145,12 @@ void __cpuinit cpu_init(void) | |||
| 1143 | /* | 1145 | /* |
| 1144 | * set up and load the per-CPU TSS | 1146 | * set up and load the per-CPU TSS |
| 1145 | */ | 1147 | */ |
| 1146 | if (!orig_ist->ist[0]) { | 1148 | if (!oist->ist[0]) { |
| 1147 | char *estacks = per_cpu(exception_stacks, cpu); | 1149 | char *estacks = per_cpu(exception_stacks, cpu); |
| 1148 | 1150 | ||
| 1149 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | 1151 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
| 1150 | estacks += exception_stack_sizes[v]; | 1152 | estacks += exception_stack_sizes[v]; |
| 1151 | orig_ist->ist[v] = t->x86_tss.ist[v] = | 1153 | oist->ist[v] = t->x86_tss.ist[v] = |
| 1152 | (unsigned long)estacks; | 1154 | (unsigned long)estacks; |
| 1153 | } | 1155 | } |
| 1154 | } | 1156 | } |
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c index dca325c03999..b368cd862997 100644 --- a/arch/x86/kernel/cpu/cpu_debug.c +++ b/arch/x86/kernel/cpu/cpu_debug.c | |||
| @@ -30,9 +30,9 @@ | |||
| 30 | #include <asm/apic.h> | 30 | #include <asm/apic.h> |
| 31 | #include <asm/desc.h> | 31 | #include <asm/desc.h> |
| 32 | 32 | ||
| 33 | static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr); | 33 | static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpud_arr); |
| 34 | static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr); | 34 | static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], cpud_priv_arr); |
| 35 | static DEFINE_PER_CPU(int, cpu_priv_count); | 35 | static DEFINE_PER_CPU(int, cpud_priv_count); |
| 36 | 36 | ||
| 37 | static DEFINE_MUTEX(cpu_debug_lock); | 37 | static DEFINE_MUTEX(cpu_debug_lock); |
| 38 | 38 | ||
| @@ -531,7 +531,7 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg, | |||
| 531 | 531 | ||
| 532 | /* Already intialized */ | 532 | /* Already intialized */ |
| 533 | if (file == CPU_INDEX_BIT) | 533 | if (file == CPU_INDEX_BIT) |
| 534 | if (per_cpu(cpu_arr[type].init, cpu)) | 534 | if (per_cpu(cpud_arr[type].init, cpu)) |
| 535 | return 0; | 535 | return 0; |
| 536 | 536 | ||
| 537 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 537 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
| @@ -543,8 +543,8 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg, | |||
| 543 | priv->reg = reg; | 543 | priv->reg = reg; |
| 544 | priv->file = file; | 544 | priv->file = file; |
| 545 | mutex_lock(&cpu_debug_lock); | 545 | mutex_lock(&cpu_debug_lock); |
| 546 | per_cpu(priv_arr[type], cpu) = priv; | 546 | per_cpu(cpud_priv_arr[type], cpu) = priv; |
| 547 | per_cpu(cpu_priv_count, cpu)++; | 547 | per_cpu(cpud_priv_count, cpu)++; |
| 548 | mutex_unlock(&cpu_debug_lock); | 548 | mutex_unlock(&cpu_debug_lock); |
| 549 | 549 | ||
| 550 | if (file) | 550 | if (file) |
| @@ -552,10 +552,10 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg, | |||
| 552 | dentry, (void *)priv, &cpu_fops); | 552 | dentry, (void *)priv, &cpu_fops); |
| 553 | else { | 553 | else { |
| 554 | debugfs_create_file(cpu_base[type].name, S_IRUGO, | 554 | debugfs_create_file(cpu_base[type].name, S_IRUGO, |
| 555 | per_cpu(cpu_arr[type].dentry, cpu), | 555 | per_cpu(cpud_arr[type].dentry, cpu), |
| 556 | (void *)priv, &cpu_fops); | 556 | (void *)priv, &cpu_fops); |
| 557 | mutex_lock(&cpu_debug_lock); | 557 | mutex_lock(&cpu_debug_lock); |
| 558 | per_cpu(cpu_arr[type].init, cpu) = 1; | 558 | per_cpu(cpud_arr[type].init, cpu) = 1; |
| 559 | mutex_unlock(&cpu_debug_lock); | 559 | mutex_unlock(&cpu_debug_lock); |
| 560 | } | 560 | } |
| 561 | 561 | ||
| @@ -615,7 +615,7 @@ static int cpu_init_allreg(unsigned cpu, struct dentry *dentry) | |||
| 615 | if (!is_typeflag_valid(cpu, cpu_base[type].flag)) | 615 | if (!is_typeflag_valid(cpu, cpu_base[type].flag)) |
| 616 | continue; | 616 | continue; |
| 617 | cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry); | 617 | cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry); |
| 618 | per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry; | 618 | per_cpu(cpud_arr[type].dentry, cpu) = cpu_dentry; |
| 619 | 619 | ||
| 620 | if (type < CPU_TSS_BIT) | 620 | if (type < CPU_TSS_BIT) |
| 621 | err = cpu_init_msr(cpu, type, cpu_dentry); | 621 | err = cpu_init_msr(cpu, type, cpu_dentry); |
| @@ -647,11 +647,11 @@ static int cpu_init_cpu(void) | |||
| 647 | err = cpu_init_allreg(cpu, cpu_dentry); | 647 | err = cpu_init_allreg(cpu, cpu_dentry); |
| 648 | 648 | ||
| 649 | pr_info("cpu%d(%d) debug files %d\n", | 649 | pr_info("cpu%d(%d) debug files %d\n", |
| 650 | cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu)); | 650 | cpu, nr_cpu_ids, per_cpu(cpud_priv_count, cpu)); |
| 651 | if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) { | 651 | if (per_cpu(cpud_priv_count, cpu) > MAX_CPU_FILES) { |
| 652 | pr_err("Register files count %d exceeds limit %d\n", | 652 | pr_err("Register files count %d exceeds limit %d\n", |
| 653 | per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES); | 653 | per_cpu(cpud_priv_count, cpu), MAX_CPU_FILES); |
| 654 | per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES; | 654 | per_cpu(cpud_priv_count, cpu) = MAX_CPU_FILES; |
| 655 | err = -ENFILE; | 655 | err = -ENFILE; |
| 656 | } | 656 | } |
| 657 | if (err) | 657 | if (err) |
| @@ -676,8 +676,8 @@ static void __exit cpu_debug_exit(void) | |||
| 676 | debugfs_remove_recursive(cpu_debugfs_dir); | 676 | debugfs_remove_recursive(cpu_debugfs_dir); |
| 677 | 677 | ||
| 678 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) | 678 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) |
| 679 | for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++) | 679 | for (i = 0; i < per_cpu(cpud_priv_count, cpu); i++) |
| 680 | kfree(per_cpu(priv_arr[i], cpu)); | 680 | kfree(per_cpu(cpud_priv_arr[i], cpu)); |
| 681 | } | 681 | } |
| 682 | 682 | ||
| 683 | module_init(cpu_debug_init); | 683 | module_init(cpu_debug_init); |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 8b581d3905cb..f28decf8dde3 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
| @@ -68,9 +68,9 @@ struct acpi_cpufreq_data { | |||
| 68 | unsigned int cpu_feature; | 68 | unsigned int cpu_feature; |
| 69 | }; | 69 | }; |
| 70 | 70 | ||
| 71 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); | 71 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data); |
| 72 | 72 | ||
| 73 | static DEFINE_PER_CPU(struct aperfmperf, old_perf); | 73 | static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf); |
| 74 | 74 | ||
| 75 | /* acpi_perf_data is a pointer to percpu data. */ | 75 | /* acpi_perf_data is a pointer to percpu data. */ |
| 76 | static struct acpi_processor_performance *acpi_perf_data; | 76 | static struct acpi_processor_performance *acpi_perf_data; |
| @@ -214,14 +214,14 @@ static u32 get_cur_val(const struct cpumask *mask) | |||
| 214 | if (unlikely(cpumask_empty(mask))) | 214 | if (unlikely(cpumask_empty(mask))) |
| 215 | return 0; | 215 | return 0; |
| 216 | 216 | ||
| 217 | switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) { | 217 | switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { |
| 218 | case SYSTEM_INTEL_MSR_CAPABLE: | 218 | case SYSTEM_INTEL_MSR_CAPABLE: |
| 219 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; | 219 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; |
| 220 | cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; | 220 | cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; |
| 221 | break; | 221 | break; |
| 222 | case SYSTEM_IO_CAPABLE: | 222 | case SYSTEM_IO_CAPABLE: |
| 223 | cmd.type = SYSTEM_IO_CAPABLE; | 223 | cmd.type = SYSTEM_IO_CAPABLE; |
| 224 | perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data; | 224 | perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; |
| 225 | cmd.addr.io.port = perf->control_register.address; | 225 | cmd.addr.io.port = perf->control_register.address; |
| 226 | cmd.addr.io.bit_width = perf->control_register.bit_width; | 226 | cmd.addr.io.bit_width = perf->control_register.bit_width; |
| 227 | break; | 227 | break; |
| @@ -268,8 +268,8 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy, | |||
| 268 | if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) | 268 | if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) |
| 269 | return 0; | 269 | return 0; |
| 270 | 270 | ||
| 271 | ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf); | 271 | ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf); |
| 272 | per_cpu(old_perf, cpu) = perf; | 272 | per_cpu(acfreq_old_perf, cpu) = perf; |
| 273 | 273 | ||
| 274 | retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; | 274 | retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; |
| 275 | 275 | ||
| @@ -278,7 +278,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy, | |||
| 278 | 278 | ||
| 279 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | 279 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) |
| 280 | { | 280 | { |
| 281 | struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); | 281 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); |
| 282 | unsigned int freq; | 282 | unsigned int freq; |
| 283 | unsigned int cached_freq; | 283 | unsigned int cached_freq; |
| 284 | 284 | ||
| @@ -322,7 +322,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, | |||
| 322 | static int acpi_cpufreq_target(struct cpufreq_policy *policy, | 322 | static int acpi_cpufreq_target(struct cpufreq_policy *policy, |
| 323 | unsigned int target_freq, unsigned int relation) | 323 | unsigned int target_freq, unsigned int relation) |
| 324 | { | 324 | { |
| 325 | struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); | 325 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
| 326 | struct acpi_processor_performance *perf; | 326 | struct acpi_processor_performance *perf; |
| 327 | struct cpufreq_freqs freqs; | 327 | struct cpufreq_freqs freqs; |
| 328 | struct drv_cmd cmd; | 328 | struct drv_cmd cmd; |
| @@ -416,7 +416,7 @@ out: | |||
| 416 | 416 | ||
| 417 | static int acpi_cpufreq_verify(struct cpufreq_policy *policy) | 417 | static int acpi_cpufreq_verify(struct cpufreq_policy *policy) |
| 418 | { | 418 | { |
| 419 | struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); | 419 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
| 420 | 420 | ||
| 421 | dprintk("acpi_cpufreq_verify\n"); | 421 | dprintk("acpi_cpufreq_verify\n"); |
| 422 | 422 | ||
| @@ -574,7 +574,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
| 574 | return -ENOMEM; | 574 | return -ENOMEM; |
| 575 | 575 | ||
| 576 | data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); | 576 | data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); |
| 577 | per_cpu(drv_data, cpu) = data; | 577 | per_cpu(acfreq_data, cpu) = data; |
| 578 | 578 | ||
| 579 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) | 579 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) |
| 580 | acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; | 580 | acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; |
| @@ -725,20 +725,20 @@ err_unreg: | |||
| 725 | acpi_processor_unregister_performance(perf, cpu); | 725 | acpi_processor_unregister_performance(perf, cpu); |
| 726 | err_free: | 726 | err_free: |
| 727 | kfree(data); | 727 | kfree(data); |
| 728 | per_cpu(drv_data, cpu) = NULL; | 728 | per_cpu(acfreq_data, cpu) = NULL; |
| 729 | 729 | ||
| 730 | return result; | 730 | return result; |
| 731 | } | 731 | } |
| 732 | 732 | ||
| 733 | static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) | 733 | static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) |
| 734 | { | 734 | { |
| 735 | struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); | 735 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
| 736 | 736 | ||
| 737 | dprintk("acpi_cpufreq_cpu_exit\n"); | 737 | dprintk("acpi_cpufreq_cpu_exit\n"); |
| 738 | 738 | ||
| 739 | if (data) { | 739 | if (data) { |
| 740 | cpufreq_frequency_table_put_attr(policy->cpu); | 740 | cpufreq_frequency_table_put_attr(policy->cpu); |
| 741 | per_cpu(drv_data, policy->cpu) = NULL; | 741 | per_cpu(acfreq_data, policy->cpu) = NULL; |
| 742 | acpi_processor_unregister_performance(data->acpi_data, | 742 | acpi_processor_unregister_performance(data->acpi_data, |
| 743 | policy->cpu); | 743 | policy->cpu); |
| 744 | kfree(data); | 744 | kfree(data); |
| @@ -749,7 +749,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |||
| 749 | 749 | ||
| 750 | static int acpi_cpufreq_resume(struct cpufreq_policy *policy) | 750 | static int acpi_cpufreq_resume(struct cpufreq_policy *policy) |
| 751 | { | 751 | { |
| 752 | struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); | 752 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
| 753 | 753 | ||
| 754 | dprintk("acpi_cpufreq_resume\n"); | 754 | dprintk("acpi_cpufreq_resume\n"); |
| 755 | 755 | ||
| @@ -764,14 +764,15 @@ static struct freq_attr *acpi_cpufreq_attr[] = { | |||
| 764 | }; | 764 | }; |
| 765 | 765 | ||
| 766 | static struct cpufreq_driver acpi_cpufreq_driver = { | 766 | static struct cpufreq_driver acpi_cpufreq_driver = { |
| 767 | .verify = acpi_cpufreq_verify, | 767 | .verify = acpi_cpufreq_verify, |
| 768 | .target = acpi_cpufreq_target, | 768 | .target = acpi_cpufreq_target, |
| 769 | .init = acpi_cpufreq_cpu_init, | 769 | .bios_limit = acpi_processor_get_bios_limit, |
| 770 | .exit = acpi_cpufreq_cpu_exit, | 770 | .init = acpi_cpufreq_cpu_init, |
| 771 | .resume = acpi_cpufreq_resume, | 771 | .exit = acpi_cpufreq_cpu_exit, |
| 772 | .name = "acpi-cpufreq", | 772 | .resume = acpi_cpufreq_resume, |
| 773 | .owner = THIS_MODULE, | 773 | .name = "acpi-cpufreq", |
| 774 | .attr = acpi_cpufreq_attr, | 774 | .owner = THIS_MODULE, |
| 775 | .attr = acpi_cpufreq_attr, | ||
| 775 | }; | 776 | }; |
| 776 | 777 | ||
| 777 | static int __init acpi_cpufreq_init(void) | 778 | static int __init acpi_cpufreq_init(void) |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c index f10dea409f40..cb01dac267d3 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c | |||
| @@ -164,7 +164,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy) | |||
| 164 | } | 164 | } |
| 165 | 165 | ||
| 166 | /* cpuinfo and default policy values */ | 166 | /* cpuinfo and default policy values */ |
| 167 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 167 | policy->cpuinfo.transition_latency = 200000; |
| 168 | policy->cur = busfreq * max_multiplier; | 168 | policy->cur = busfreq * max_multiplier; |
| 169 | 169 | ||
| 170 | result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio); | 170 | result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio); |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c index d47c775eb0ab..9a97116f89e5 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c | |||
| @@ -714,14 +714,17 @@ static struct freq_attr *powernow_table_attr[] = { | |||
| 714 | }; | 714 | }; |
| 715 | 715 | ||
| 716 | static struct cpufreq_driver powernow_driver = { | 716 | static struct cpufreq_driver powernow_driver = { |
| 717 | .verify = powernow_verify, | 717 | .verify = powernow_verify, |
| 718 | .target = powernow_target, | 718 | .target = powernow_target, |
| 719 | .get = powernow_get, | 719 | .get = powernow_get, |
| 720 | .init = powernow_cpu_init, | 720 | #ifdef CONFIG_X86_POWERNOW_K7_ACPI |
| 721 | .exit = powernow_cpu_exit, | 721 | .bios_limit = acpi_processor_get_bios_limit, |
| 722 | .name = "powernow-k7", | 722 | #endif |
| 723 | .owner = THIS_MODULE, | 723 | .init = powernow_cpu_init, |
| 724 | .attr = powernow_table_attr, | 724 | .exit = powernow_cpu_exit, |
| 725 | .name = "powernow-k7", | ||
| 726 | .owner = THIS_MODULE, | ||
| 727 | .attr = powernow_table_attr, | ||
| 725 | }; | 728 | }; |
| 726 | 729 | ||
| 727 | static int __init powernow_init(void) | 730 | static int __init powernow_init(void) |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 3f12dabeab52..a9df9441a9a2 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
| @@ -1118,7 +1118,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, | |||
| 1118 | static int powernowk8_target(struct cpufreq_policy *pol, | 1118 | static int powernowk8_target(struct cpufreq_policy *pol, |
| 1119 | unsigned targfreq, unsigned relation) | 1119 | unsigned targfreq, unsigned relation) |
| 1120 | { | 1120 | { |
| 1121 | cpumask_t oldmask; | 1121 | cpumask_var_t oldmask; |
| 1122 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); | 1122 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); |
| 1123 | u32 checkfid; | 1123 | u32 checkfid; |
| 1124 | u32 checkvid; | 1124 | u32 checkvid; |
| @@ -1131,9 +1131,13 @@ static int powernowk8_target(struct cpufreq_policy *pol, | |||
| 1131 | checkfid = data->currfid; | 1131 | checkfid = data->currfid; |
| 1132 | checkvid = data->currvid; | 1132 | checkvid = data->currvid; |
| 1133 | 1133 | ||
| 1134 | /* only run on specific CPU from here on */ | 1134 | /* only run on specific CPU from here on. */ |
| 1135 | oldmask = current->cpus_allowed; | 1135 | /* This is poor form: use a workqueue or smp_call_function_single */ |
| 1136 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); | 1136 | if (!alloc_cpumask_var(&oldmask, GFP_KERNEL)) |
| 1137 | return -ENOMEM; | ||
| 1138 | |||
| 1139 | cpumask_copy(oldmask, tsk_cpumask(current)); | ||
| 1140 | set_cpus_allowed_ptr(current, cpumask_of(pol->cpu)); | ||
| 1137 | 1141 | ||
| 1138 | if (smp_processor_id() != pol->cpu) { | 1142 | if (smp_processor_id() != pol->cpu) { |
| 1139 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); | 1143 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); |
| @@ -1193,7 +1197,8 @@ static int powernowk8_target(struct cpufreq_policy *pol, | |||
| 1193 | ret = 0; | 1197 | ret = 0; |
| 1194 | 1198 | ||
| 1195 | err_out: | 1199 | err_out: |
| 1196 | set_cpus_allowed_ptr(current, &oldmask); | 1200 | set_cpus_allowed_ptr(current, oldmask); |
| 1201 | free_cpumask_var(oldmask); | ||
| 1197 | return ret; | 1202 | return ret; |
| 1198 | } | 1203 | } |
| 1199 | 1204 | ||
| @@ -1393,14 +1398,15 @@ static struct freq_attr *powernow_k8_attr[] = { | |||
| 1393 | }; | 1398 | }; |
| 1394 | 1399 | ||
| 1395 | static struct cpufreq_driver cpufreq_amd64_driver = { | 1400 | static struct cpufreq_driver cpufreq_amd64_driver = { |
| 1396 | .verify = powernowk8_verify, | 1401 | .verify = powernowk8_verify, |
| 1397 | .target = powernowk8_target, | 1402 | .target = powernowk8_target, |
| 1398 | .init = powernowk8_cpu_init, | 1403 | .bios_limit = acpi_processor_get_bios_limit, |
| 1399 | .exit = __devexit_p(powernowk8_cpu_exit), | 1404 | .init = powernowk8_cpu_init, |
| 1400 | .get = powernowk8_get, | 1405 | .exit = __devexit_p(powernowk8_cpu_exit), |
| 1401 | .name = "powernow-k8", | 1406 | .get = powernowk8_get, |
| 1402 | .owner = THIS_MODULE, | 1407 | .name = "powernow-k8", |
| 1403 | .attr = powernow_k8_attr, | 1408 | .owner = THIS_MODULE, |
| 1409 | .attr = powernow_k8_attr, | ||
| 1404 | }; | 1410 | }; |
| 1405 | 1411 | ||
| 1406 | /* driver entry point for init */ | 1412 | /* driver entry point for init */ |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 3ae5a7a3a500..2ce8e0b5cc54 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
| @@ -39,7 +39,7 @@ static struct pci_dev *speedstep_chipset_dev; | |||
| 39 | 39 | ||
| 40 | /* speedstep_processor | 40 | /* speedstep_processor |
| 41 | */ | 41 | */ |
| 42 | static unsigned int speedstep_processor; | 42 | static enum speedstep_processor speedstep_processor; |
| 43 | 43 | ||
| 44 | static u32 pmbase; | 44 | static u32 pmbase; |
| 45 | 45 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c index f4c290b8482f..ad0083abfa23 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c | |||
| @@ -34,7 +34,7 @@ static int relaxed_check; | |||
| 34 | * GET PROCESSOR CORE SPEED IN KHZ * | 34 | * GET PROCESSOR CORE SPEED IN KHZ * |
| 35 | *********************************************************************/ | 35 | *********************************************************************/ |
| 36 | 36 | ||
| 37 | static unsigned int pentium3_get_frequency(unsigned int processor) | 37 | static unsigned int pentium3_get_frequency(enum speedstep_processor processor) |
| 38 | { | 38 | { |
| 39 | /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ | 39 | /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ |
| 40 | struct { | 40 | struct { |
| @@ -227,7 +227,7 @@ static unsigned int pentium4_get_frequency(void) | |||
| 227 | 227 | ||
| 228 | 228 | ||
| 229 | /* Warning: may get called from smp_call_function_single. */ | 229 | /* Warning: may get called from smp_call_function_single. */ |
| 230 | unsigned int speedstep_get_frequency(unsigned int processor) | 230 | unsigned int speedstep_get_frequency(enum speedstep_processor processor) |
| 231 | { | 231 | { |
| 232 | switch (processor) { | 232 | switch (processor) { |
| 233 | case SPEEDSTEP_CPU_PCORE: | 233 | case SPEEDSTEP_CPU_PCORE: |
| @@ -380,7 +380,7 @@ EXPORT_SYMBOL_GPL(speedstep_detect_processor); | |||
| 380 | * DETECT SPEEDSTEP SPEEDS * | 380 | * DETECT SPEEDSTEP SPEEDS * |
| 381 | *********************************************************************/ | 381 | *********************************************************************/ |
| 382 | 382 | ||
| 383 | unsigned int speedstep_get_freqs(unsigned int processor, | 383 | unsigned int speedstep_get_freqs(enum speedstep_processor processor, |
| 384 | unsigned int *low_speed, | 384 | unsigned int *low_speed, |
| 385 | unsigned int *high_speed, | 385 | unsigned int *high_speed, |
| 386 | unsigned int *transition_latency, | 386 | unsigned int *transition_latency, |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h index 2b6c04e5a304..70d9cea1219d 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h | |||
| @@ -11,18 +11,18 @@ | |||
| 11 | 11 | ||
| 12 | 12 | ||
| 13 | /* processors */ | 13 | /* processors */ |
| 14 | 14 | enum speedstep_processor { | |
| 15 | #define SPEEDSTEP_CPU_PIII_C_EARLY 0x00000001 /* Coppermine core */ | 15 | SPEEDSTEP_CPU_PIII_C_EARLY = 0x00000001, /* Coppermine core */ |
| 16 | #define SPEEDSTEP_CPU_PIII_C 0x00000002 /* Coppermine core */ | 16 | SPEEDSTEP_CPU_PIII_C = 0x00000002, /* Coppermine core */ |
| 17 | #define SPEEDSTEP_CPU_PIII_T 0x00000003 /* Tualatin core */ | 17 | SPEEDSTEP_CPU_PIII_T = 0x00000003, /* Tualatin core */ |
| 18 | #define SPEEDSTEP_CPU_P4M 0x00000004 /* P4-M */ | 18 | SPEEDSTEP_CPU_P4M = 0x00000004, /* P4-M */ |
| 19 | |||
| 20 | /* the following processors are not speedstep-capable and are not auto-detected | 19 | /* the following processors are not speedstep-capable and are not auto-detected |
| 21 | * in speedstep_detect_processor(). However, their speed can be detected using | 20 | * in speedstep_detect_processor(). However, their speed can be detected using |
| 22 | * the speedstep_get_frequency() call. */ | 21 | * the speedstep_get_frequency() call. */ |
| 23 | #define SPEEDSTEP_CPU_PM 0xFFFFFF03 /* Pentium M */ | 22 | SPEEDSTEP_CPU_PM = 0xFFFFFF03, /* Pentium M */ |
| 24 | #define SPEEDSTEP_CPU_P4D 0xFFFFFF04 /* desktop P4 */ | 23 | SPEEDSTEP_CPU_P4D = 0xFFFFFF04, /* desktop P4 */ |
| 25 | #define SPEEDSTEP_CPU_PCORE 0xFFFFFF05 /* Core */ | 24 | SPEEDSTEP_CPU_PCORE = 0xFFFFFF05, /* Core */ |
| 25 | }; | ||
| 26 | 26 | ||
| 27 | /* speedstep states -- only two of them */ | 27 | /* speedstep states -- only two of them */ |
| 28 | 28 | ||
| @@ -31,10 +31,10 @@ | |||
| 31 | 31 | ||
| 32 | 32 | ||
| 33 | /* detect a speedstep-capable processor */ | 33 | /* detect a speedstep-capable processor */ |
| 34 | extern unsigned int speedstep_detect_processor (void); | 34 | extern enum speedstep_processor speedstep_detect_processor(void); |
| 35 | 35 | ||
| 36 | /* detect the current speed (in khz) of the processor */ | 36 | /* detect the current speed (in khz) of the processor */ |
| 37 | extern unsigned int speedstep_get_frequency(unsigned int processor); | 37 | extern unsigned int speedstep_get_frequency(enum speedstep_processor processor); |
| 38 | 38 | ||
| 39 | 39 | ||
| 40 | /* detect the low and high speeds of the processor. The callback | 40 | /* detect the low and high speeds of the processor. The callback |
| @@ -42,7 +42,7 @@ extern unsigned int speedstep_get_frequency(unsigned int processor); | |||
| 42 | * SPEEDSTEP_LOW; the second argument is zero so that no | 42 | * SPEEDSTEP_LOW; the second argument is zero so that no |
| 43 | * cpufreq_notify_transition calls are initiated. | 43 | * cpufreq_notify_transition calls are initiated. |
| 44 | */ | 44 | */ |
| 45 | extern unsigned int speedstep_get_freqs(unsigned int processor, | 45 | extern unsigned int speedstep_get_freqs(enum speedstep_processor processor, |
| 46 | unsigned int *low_speed, | 46 | unsigned int *low_speed, |
| 47 | unsigned int *high_speed, | 47 | unsigned int *high_speed, |
| 48 | unsigned int *transition_latency, | 48 | unsigned int *transition_latency, |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c index befea088e4f5..04d73c114e49 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c | |||
| @@ -35,7 +35,7 @@ static int smi_cmd; | |||
| 35 | static unsigned int smi_sig; | 35 | static unsigned int smi_sig; |
| 36 | 36 | ||
| 37 | /* info about the processor */ | 37 | /* info about the processor */ |
| 38 | static unsigned int speedstep_processor; | 38 | static enum speedstep_processor speedstep_processor; |
| 39 | 39 | ||
| 40 | /* | 40 | /* |
| 41 | * There are only two frequency states for each processor. Values | 41 | * There are only two frequency states for each processor. Values |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index c900b73f9224..9c31e8b09d2c 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
| @@ -270,8 +270,6 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | |||
| 270 | node = cpu_to_node(cpu); | 270 | node = cpu_to_node(cpu); |
| 271 | } | 271 | } |
| 272 | numa_set_node(cpu, node); | 272 | numa_set_node(cpu, node); |
| 273 | |||
| 274 | printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node); | ||
| 275 | #endif | 273 | #endif |
| 276 | } | 274 | } |
| 277 | 275 | ||
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 6c40f6b5b340..fc6c8ef92dcc 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
| @@ -499,26 +499,27 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
| 499 | #ifdef CONFIG_SYSFS | 499 | #ifdef CONFIG_SYSFS |
| 500 | 500 | ||
| 501 | /* pointer to _cpuid4_info array (for each cache leaf) */ | 501 | /* pointer to _cpuid4_info array (for each cache leaf) */ |
| 502 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); | 502 | static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); |
| 503 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) | 503 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) |
| 504 | 504 | ||
| 505 | #ifdef CONFIG_SMP | 505 | #ifdef CONFIG_SMP |
| 506 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 506 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) |
| 507 | { | 507 | { |
| 508 | struct _cpuid4_info *this_leaf, *sibling_leaf; | 508 | struct _cpuid4_info *this_leaf, *sibling_leaf; |
| 509 | unsigned long num_threads_sharing; | 509 | unsigned long num_threads_sharing; |
| 510 | int index_msb, i; | 510 | int index_msb, i, sibling; |
| 511 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 511 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
| 512 | 512 | ||
| 513 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { | 513 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { |
| 514 | struct cpuinfo_x86 *d; | 514 | for_each_cpu(i, c->llc_shared_map) { |
| 515 | for_each_online_cpu(i) { | 515 | if (!per_cpu(ici_cpuid4_info, i)) |
| 516 | if (!per_cpu(cpuid4_info, i)) | ||
| 517 | continue; | 516 | continue; |
| 518 | d = &cpu_data(i); | ||
| 519 | this_leaf = CPUID4_INFO_IDX(i, index); | 517 | this_leaf = CPUID4_INFO_IDX(i, index); |
| 520 | cpumask_copy(to_cpumask(this_leaf->shared_cpu_map), | 518 | for_each_cpu(sibling, c->llc_shared_map) { |
| 521 | d->llc_shared_map); | 519 | if (!cpu_online(sibling)) |
| 520 | continue; | ||
| 521 | set_bit(sibling, this_leaf->shared_cpu_map); | ||
| 522 | } | ||
| 522 | } | 523 | } |
| 523 | return; | 524 | return; |
| 524 | } | 525 | } |
| @@ -535,7 +536,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
| 535 | c->apicid >> index_msb) { | 536 | c->apicid >> index_msb) { |
| 536 | cpumask_set_cpu(i, | 537 | cpumask_set_cpu(i, |
| 537 | to_cpumask(this_leaf->shared_cpu_map)); | 538 | to_cpumask(this_leaf->shared_cpu_map)); |
| 538 | if (i != cpu && per_cpu(cpuid4_info, i)) { | 539 | if (i != cpu && per_cpu(ici_cpuid4_info, i)) { |
| 539 | sibling_leaf = | 540 | sibling_leaf = |
| 540 | CPUID4_INFO_IDX(i, index); | 541 | CPUID4_INFO_IDX(i, index); |
| 541 | cpumask_set_cpu(cpu, to_cpumask( | 542 | cpumask_set_cpu(cpu, to_cpumask( |
| @@ -574,8 +575,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) | |||
| 574 | for (i = 0; i < num_cache_leaves; i++) | 575 | for (i = 0; i < num_cache_leaves; i++) |
| 575 | cache_remove_shared_cpu_map(cpu, i); | 576 | cache_remove_shared_cpu_map(cpu, i); |
| 576 | 577 | ||
| 577 | kfree(per_cpu(cpuid4_info, cpu)); | 578 | kfree(per_cpu(ici_cpuid4_info, cpu)); |
| 578 | per_cpu(cpuid4_info, cpu) = NULL; | 579 | per_cpu(ici_cpuid4_info, cpu) = NULL; |
| 579 | } | 580 | } |
| 580 | 581 | ||
| 581 | static int | 582 | static int |
| @@ -614,15 +615,15 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
| 614 | if (num_cache_leaves == 0) | 615 | if (num_cache_leaves == 0) |
| 615 | return -ENOENT; | 616 | return -ENOENT; |
| 616 | 617 | ||
| 617 | per_cpu(cpuid4_info, cpu) = kzalloc( | 618 | per_cpu(ici_cpuid4_info, cpu) = kzalloc( |
| 618 | sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); | 619 | sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); |
| 619 | if (per_cpu(cpuid4_info, cpu) == NULL) | 620 | if (per_cpu(ici_cpuid4_info, cpu) == NULL) |
| 620 | return -ENOMEM; | 621 | return -ENOMEM; |
| 621 | 622 | ||
| 622 | smp_call_function_single(cpu, get_cpu_leaves, &retval, true); | 623 | smp_call_function_single(cpu, get_cpu_leaves, &retval, true); |
| 623 | if (retval) { | 624 | if (retval) { |
| 624 | kfree(per_cpu(cpuid4_info, cpu)); | 625 | kfree(per_cpu(ici_cpuid4_info, cpu)); |
| 625 | per_cpu(cpuid4_info, cpu) = NULL; | 626 | per_cpu(ici_cpuid4_info, cpu) = NULL; |
| 626 | } | 627 | } |
| 627 | 628 | ||
| 628 | return retval; | 629 | return retval; |
| @@ -634,7 +635,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
| 634 | extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ | 635 | extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ |
| 635 | 636 | ||
| 636 | /* pointer to kobject for cpuX/cache */ | 637 | /* pointer to kobject for cpuX/cache */ |
| 637 | static DEFINE_PER_CPU(struct kobject *, cache_kobject); | 638 | static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject); |
| 638 | 639 | ||
| 639 | struct _index_kobject { | 640 | struct _index_kobject { |
| 640 | struct kobject kobj; | 641 | struct kobject kobj; |
| @@ -643,8 +644,8 @@ struct _index_kobject { | |||
| 643 | }; | 644 | }; |
| 644 | 645 | ||
| 645 | /* pointer to array of kobjects for cpuX/cache/indexY */ | 646 | /* pointer to array of kobjects for cpuX/cache/indexY */ |
| 646 | static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); | 647 | static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject); |
| 647 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) | 648 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y])) |
| 648 | 649 | ||
| 649 | #define show_one_plus(file_name, object, val) \ | 650 | #define show_one_plus(file_name, object, val) \ |
| 650 | static ssize_t show_##file_name \ | 651 | static ssize_t show_##file_name \ |
| @@ -863,10 +864,10 @@ static struct kobj_type ktype_percpu_entry = { | |||
| 863 | 864 | ||
| 864 | static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) | 865 | static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) |
| 865 | { | 866 | { |
| 866 | kfree(per_cpu(cache_kobject, cpu)); | 867 | kfree(per_cpu(ici_cache_kobject, cpu)); |
| 867 | kfree(per_cpu(index_kobject, cpu)); | 868 | kfree(per_cpu(ici_index_kobject, cpu)); |
| 868 | per_cpu(cache_kobject, cpu) = NULL; | 869 | per_cpu(ici_cache_kobject, cpu) = NULL; |
| 869 | per_cpu(index_kobject, cpu) = NULL; | 870 | per_cpu(ici_index_kobject, cpu) = NULL; |
| 870 | free_cache_attributes(cpu); | 871 | free_cache_attributes(cpu); |
| 871 | } | 872 | } |
| 872 | 873 | ||
| @@ -882,14 +883,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) | |||
| 882 | return err; | 883 | return err; |
| 883 | 884 | ||
| 884 | /* Allocate all required memory */ | 885 | /* Allocate all required memory */ |
| 885 | per_cpu(cache_kobject, cpu) = | 886 | per_cpu(ici_cache_kobject, cpu) = |
| 886 | kzalloc(sizeof(struct kobject), GFP_KERNEL); | 887 | kzalloc(sizeof(struct kobject), GFP_KERNEL); |
| 887 | if (unlikely(per_cpu(cache_kobject, cpu) == NULL)) | 888 | if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL)) |
| 888 | goto err_out; | 889 | goto err_out; |
| 889 | 890 | ||
| 890 | per_cpu(index_kobject, cpu) = kzalloc( | 891 | per_cpu(ici_index_kobject, cpu) = kzalloc( |
| 891 | sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); | 892 | sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); |
| 892 | if (unlikely(per_cpu(index_kobject, cpu) == NULL)) | 893 | if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL)) |
| 893 | goto err_out; | 894 | goto err_out; |
| 894 | 895 | ||
| 895 | return 0; | 896 | return 0; |
| @@ -913,7 +914,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
| 913 | if (unlikely(retval < 0)) | 914 | if (unlikely(retval < 0)) |
| 914 | return retval; | 915 | return retval; |
| 915 | 916 | ||
| 916 | retval = kobject_init_and_add(per_cpu(cache_kobject, cpu), | 917 | retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu), |
| 917 | &ktype_percpu_entry, | 918 | &ktype_percpu_entry, |
| 918 | &sys_dev->kobj, "%s", "cache"); | 919 | &sys_dev->kobj, "%s", "cache"); |
| 919 | if (retval < 0) { | 920 | if (retval < 0) { |
| @@ -927,12 +928,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
| 927 | this_object->index = i; | 928 | this_object->index = i; |
| 928 | retval = kobject_init_and_add(&(this_object->kobj), | 929 | retval = kobject_init_and_add(&(this_object->kobj), |
| 929 | &ktype_cache, | 930 | &ktype_cache, |
| 930 | per_cpu(cache_kobject, cpu), | 931 | per_cpu(ici_cache_kobject, cpu), |
| 931 | "index%1lu", i); | 932 | "index%1lu", i); |
| 932 | if (unlikely(retval)) { | 933 | if (unlikely(retval)) { |
| 933 | for (j = 0; j < i; j++) | 934 | for (j = 0; j < i; j++) |
| 934 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); | 935 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); |
| 935 | kobject_put(per_cpu(cache_kobject, cpu)); | 936 | kobject_put(per_cpu(ici_cache_kobject, cpu)); |
| 936 | cpuid4_cache_sysfs_exit(cpu); | 937 | cpuid4_cache_sysfs_exit(cpu); |
| 937 | return retval; | 938 | return retval; |
| 938 | } | 939 | } |
| @@ -940,7 +941,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
| 940 | } | 941 | } |
| 941 | cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); | 942 | cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); |
| 942 | 943 | ||
| 943 | kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); | 944 | kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD); |
| 944 | return 0; | 945 | return 0; |
| 945 | } | 946 | } |
| 946 | 947 | ||
| @@ -949,7 +950,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
| 949 | unsigned int cpu = sys_dev->id; | 950 | unsigned int cpu = sys_dev->id; |
| 950 | unsigned long i; | 951 | unsigned long i; |
| 951 | 952 | ||
| 952 | if (per_cpu(cpuid4_info, cpu) == NULL) | 953 | if (per_cpu(ici_cpuid4_info, cpu) == NULL) |
| 953 | return; | 954 | return; |
| 954 | if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) | 955 | if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) |
| 955 | return; | 956 | return; |
| @@ -957,7 +958,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
| 957 | 958 | ||
| 958 | for (i = 0; i < num_cache_leaves; i++) | 959 | for (i = 0; i < num_cache_leaves; i++) |
| 959 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); | 960 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); |
| 960 | kobject_put(per_cpu(cache_kobject, cpu)); | 961 | kobject_put(per_cpu(ici_cache_kobject, cpu)); |
| 961 | cpuid4_cache_sysfs_exit(cpu); | 962 | cpuid4_cache_sysfs_exit(cpu); |
| 962 | } | 963 | } |
| 963 | 964 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index d7ebf25d10ed..a8aacd4b513c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
| @@ -1388,13 +1388,14 @@ static void __mcheck_cpu_init_timer(void) | |||
| 1388 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1388 | struct timer_list *t = &__get_cpu_var(mce_timer); |
| 1389 | int *n = &__get_cpu_var(mce_next_interval); | 1389 | int *n = &__get_cpu_var(mce_next_interval); |
| 1390 | 1390 | ||
| 1391 | setup_timer(t, mce_start_timer, smp_processor_id()); | ||
| 1392 | |||
| 1391 | if (mce_ignore_ce) | 1393 | if (mce_ignore_ce) |
| 1392 | return; | 1394 | return; |
| 1393 | 1395 | ||
| 1394 | *n = check_interval * HZ; | 1396 | *n = check_interval * HZ; |
| 1395 | if (!*n) | 1397 | if (!*n) |
| 1396 | return; | 1398 | return; |
| 1397 | setup_timer(t, mce_start_timer, smp_processor_id()); | ||
| 1398 | t->expires = round_jiffies(jiffies + *n); | 1399 | t->expires = round_jiffies(jiffies + *n); |
| 1399 | add_timer_on(t, smp_processor_id()); | 1400 | add_timer_on(t, smp_processor_id()); |
| 1400 | } | 1401 | } |
| @@ -1928,7 +1929,7 @@ error2: | |||
| 1928 | sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[j].attr); | 1929 | sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[j].attr); |
| 1929 | error: | 1930 | error: |
| 1930 | while (--i >= 0) | 1931 | while (--i >= 0) |
| 1931 | sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[i].attr); | 1932 | sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]); |
| 1932 | 1933 | ||
| 1933 | sysdev_unregister(&per_cpu(mce_dev, cpu)); | 1934 | sysdev_unregister(&per_cpu(mce_dev, cpu)); |
| 1934 | 1935 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 4fef985fc221..81c499eceb21 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
| @@ -256,6 +256,16 @@ asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) | |||
| 256 | ack_APIC_irq(); | 256 | ack_APIC_irq(); |
| 257 | } | 257 | } |
| 258 | 258 | ||
| 259 | /* Thermal monitoring depends on APIC, ACPI and clock modulation */ | ||
| 260 | static int intel_thermal_supported(struct cpuinfo_x86 *c) | ||
| 261 | { | ||
| 262 | if (!cpu_has_apic) | ||
| 263 | return 0; | ||
| 264 | if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC)) | ||
| 265 | return 0; | ||
| 266 | return 1; | ||
| 267 | } | ||
| 268 | |||
| 259 | void __init mcheck_intel_therm_init(void) | 269 | void __init mcheck_intel_therm_init(void) |
| 260 | { | 270 | { |
| 261 | /* | 271 | /* |
| @@ -263,8 +273,7 @@ void __init mcheck_intel_therm_init(void) | |||
| 263 | * LVT value on BSP and use that value to restore APs' thermal LVT | 273 | * LVT value on BSP and use that value to restore APs' thermal LVT |
| 264 | * entry BIOS programmed later | 274 | * entry BIOS programmed later |
| 265 | */ | 275 | */ |
| 266 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ACPI) && | 276 | if (intel_thermal_supported(&boot_cpu_data)) |
| 267 | cpu_has(&boot_cpu_data, X86_FEATURE_ACC)) | ||
| 268 | lvtthmr_init = apic_read(APIC_LVTTHMR); | 277 | lvtthmr_init = apic_read(APIC_LVTTHMR); |
| 269 | } | 278 | } |
| 270 | 279 | ||
| @@ -274,8 +283,7 @@ void intel_init_thermal(struct cpuinfo_x86 *c) | |||
| 274 | int tm2 = 0; | 283 | int tm2 = 0; |
| 275 | u32 l, h; | 284 | u32 l, h; |
| 276 | 285 | ||
| 277 | /* Thermal monitoring depends on ACPI and clock modulation*/ | 286 | if (!intel_thermal_supported(c)) |
| 278 | if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC)) | ||
| 279 | return; | 287 | return; |
| 280 | 288 | ||
| 281 | /* | 289 | /* |
| @@ -339,8 +347,8 @@ void intel_init_thermal(struct cpuinfo_x86 *c) | |||
| 339 | l = apic_read(APIC_LVTTHMR); | 347 | l = apic_read(APIC_LVTTHMR); |
| 340 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); | 348 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); |
| 341 | 349 | ||
| 342 | printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n", | 350 | printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n", |
| 343 | cpu, tm2 ? "TM2" : "TM1"); | 351 | tm2 ? "TM2" : "TM1"); |
| 344 | 352 | ||
| 345 | /* enable thermal throttle processing */ | 353 | /* enable thermal throttle processing */ |
| 346 | atomic_set(&therm_throt_en, 1); | 354 | atomic_set(&therm_throt_en, 1); |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index ab1a8a89b984..45506d5dd8df 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
| @@ -1632,6 +1632,7 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc) | |||
| 1632 | 1632 | ||
| 1633 | data.period = event->hw.last_period; | 1633 | data.period = event->hw.last_period; |
| 1634 | data.addr = 0; | 1634 | data.addr = 0; |
| 1635 | data.raw = NULL; | ||
| 1635 | regs.ip = 0; | 1636 | regs.ip = 0; |
| 1636 | 1637 | ||
| 1637 | /* | 1638 | /* |
| @@ -1749,6 +1750,7 @@ static int p6_pmu_handle_irq(struct pt_regs *regs) | |||
| 1749 | u64 val; | 1750 | u64 val; |
| 1750 | 1751 | ||
| 1751 | data.addr = 0; | 1752 | data.addr = 0; |
| 1753 | data.raw = NULL; | ||
| 1752 | 1754 | ||
| 1753 | cpuc = &__get_cpu_var(cpu_hw_events); | 1755 | cpuc = &__get_cpu_var(cpu_hw_events); |
| 1754 | 1756 | ||
| @@ -1794,6 +1796,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
| 1794 | u64 ack, status; | 1796 | u64 ack, status; |
| 1795 | 1797 | ||
| 1796 | data.addr = 0; | 1798 | data.addr = 0; |
| 1799 | data.raw = NULL; | ||
| 1797 | 1800 | ||
| 1798 | cpuc = &__get_cpu_var(cpu_hw_events); | 1801 | cpuc = &__get_cpu_var(cpu_hw_events); |
| 1799 | 1802 | ||
| @@ -1857,6 +1860,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) | |||
| 1857 | u64 val; | 1860 | u64 val; |
| 1858 | 1861 | ||
| 1859 | data.addr = 0; | 1862 | data.addr = 0; |
| 1863 | data.raw = NULL; | ||
| 1860 | 1864 | ||
| 1861 | cpuc = &__get_cpu_var(cpu_hw_events); | 1865 | cpuc = &__get_cpu_var(cpu_hw_events); |
| 1862 | 1866 | ||
| @@ -2062,12 +2066,6 @@ static __init int p6_pmu_init(void) | |||
| 2062 | 2066 | ||
| 2063 | x86_pmu = p6_pmu; | 2067 | x86_pmu = p6_pmu; |
| 2064 | 2068 | ||
| 2065 | if (!cpu_has_apic) { | ||
| 2066 | pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n"); | ||
| 2067 | pr_info("no hardware sampling interrupt available.\n"); | ||
| 2068 | x86_pmu.apic = 0; | ||
| 2069 | } | ||
| 2070 | |||
| 2071 | return 0; | 2069 | return 0; |
| 2072 | } | 2070 | } |
| 2073 | 2071 | ||
| @@ -2159,6 +2157,16 @@ static __init int amd_pmu_init(void) | |||
| 2159 | return 0; | 2157 | return 0; |
| 2160 | } | 2158 | } |
| 2161 | 2159 | ||
| 2160 | static void __init pmu_check_apic(void) | ||
| 2161 | { | ||
| 2162 | if (cpu_has_apic) | ||
| 2163 | return; | ||
| 2164 | |||
| 2165 | x86_pmu.apic = 0; | ||
| 2166 | pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n"); | ||
| 2167 | pr_info("no hardware sampling interrupt available.\n"); | ||
| 2168 | } | ||
| 2169 | |||
| 2162 | void __init init_hw_perf_events(void) | 2170 | void __init init_hw_perf_events(void) |
| 2163 | { | 2171 | { |
| 2164 | int err; | 2172 | int err; |
| @@ -2180,6 +2188,8 @@ void __init init_hw_perf_events(void) | |||
| 2180 | return; | 2188 | return; |
| 2181 | } | 2189 | } |
| 2182 | 2190 | ||
| 2191 | pmu_check_apic(); | ||
| 2192 | |||
| 2183 | pr_cont("%s PMU driver.\n", x86_pmu.name); | 2193 | pr_cont("%s PMU driver.\n", x86_pmu.name); |
| 2184 | 2194 | ||
| 2185 | if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) { | 2195 | if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) { |
| @@ -2287,7 +2297,7 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip) | |||
| 2287 | 2297 | ||
| 2288 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); | 2298 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); |
| 2289 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); | 2299 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); |
| 2290 | static DEFINE_PER_CPU(int, in_nmi_frame); | 2300 | static DEFINE_PER_CPU(int, in_ignored_frame); |
| 2291 | 2301 | ||
| 2292 | 2302 | ||
| 2293 | static void | 2303 | static void |
| @@ -2303,8 +2313,9 @@ static void backtrace_warning(void *data, char *msg) | |||
| 2303 | 2313 | ||
| 2304 | static int backtrace_stack(void *data, char *name) | 2314 | static int backtrace_stack(void *data, char *name) |
| 2305 | { | 2315 | { |
| 2306 | per_cpu(in_nmi_frame, smp_processor_id()) = | 2316 | per_cpu(in_ignored_frame, smp_processor_id()) = |
| 2307 | x86_is_stack_id(NMI_STACK, name); | 2317 | x86_is_stack_id(NMI_STACK, name) || |
| 2318 | x86_is_stack_id(DEBUG_STACK, name); | ||
| 2308 | 2319 | ||
| 2309 | return 0; | 2320 | return 0; |
| 2310 | } | 2321 | } |
| @@ -2313,7 +2324,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) | |||
| 2313 | { | 2324 | { |
| 2314 | struct perf_callchain_entry *entry = data; | 2325 | struct perf_callchain_entry *entry = data; |
| 2315 | 2326 | ||
| 2316 | if (per_cpu(in_nmi_frame, smp_processor_id())) | 2327 | if (per_cpu(in_ignored_frame, smp_processor_id())) |
| 2317 | return; | 2328 | return; |
| 2318 | 2329 | ||
| 2319 | if (reliable) | 2330 | if (reliable) |
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index ef42a038f1a6..1c47390dd0e5 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c | |||
| @@ -265,13 +265,13 @@ struct ds_context { | |||
| 265 | int cpu; | 265 | int cpu; |
| 266 | }; | 266 | }; |
| 267 | 267 | ||
| 268 | static DEFINE_PER_CPU(struct ds_context *, cpu_context); | 268 | static DEFINE_PER_CPU(struct ds_context *, cpu_ds_context); |
| 269 | 269 | ||
| 270 | 270 | ||
| 271 | static struct ds_context *ds_get_context(struct task_struct *task, int cpu) | 271 | static struct ds_context *ds_get_context(struct task_struct *task, int cpu) |
| 272 | { | 272 | { |
| 273 | struct ds_context **p_context = | 273 | struct ds_context **p_context = |
| 274 | (task ? &task->thread.ds_ctx : &per_cpu(cpu_context, cpu)); | 274 | (task ? &task->thread.ds_ctx : &per_cpu(cpu_ds_context, cpu)); |
| 275 | struct ds_context *context = NULL; | 275 | struct ds_context *context = NULL; |
| 276 | struct ds_context *new_context = NULL; | 276 | struct ds_context *new_context = NULL; |
| 277 | 277 | ||
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 8e740934bd1f..b13af53883aa 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c | |||
| @@ -103,6 +103,35 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | |||
| 103 | return NULL; | 103 | return NULL; |
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | static inline int | ||
| 107 | in_irq_stack(unsigned long *stack, unsigned long *irq_stack, | ||
| 108 | unsigned long *irq_stack_end) | ||
| 109 | { | ||
| 110 | return (stack >= irq_stack && stack < irq_stack_end); | ||
| 111 | } | ||
| 112 | |||
| 113 | /* | ||
| 114 | * We are returning from the irq stack and go to the previous one. | ||
| 115 | * If the previous stack is also in the irq stack, then bp in the first | ||
| 116 | * frame of the irq stack points to the previous, interrupted one. | ||
| 117 | * Otherwise we have another level of indirection: We first save | ||
| 118 | * the bp of the previous stack, then we switch the stack to the irq one | ||
| 119 | * and save a new bp that links to the previous one. | ||
| 120 | * (See save_args()) | ||
| 121 | */ | ||
| 122 | static inline unsigned long | ||
| 123 | fixup_bp_irq_link(unsigned long bp, unsigned long *stack, | ||
| 124 | unsigned long *irq_stack, unsigned long *irq_stack_end) | ||
| 125 | { | ||
| 126 | #ifdef CONFIG_FRAME_POINTER | ||
| 127 | struct stack_frame *frame = (struct stack_frame *)bp; | ||
| 128 | |||
| 129 | if (!in_irq_stack(stack, irq_stack, irq_stack_end)) | ||
| 130 | return (unsigned long)frame->next_frame; | ||
| 131 | #endif | ||
| 132 | return bp; | ||
| 133 | } | ||
| 134 | |||
| 106 | /* | 135 | /* |
| 107 | * x86-64 can have up to three kernel stacks: | 136 | * x86-64 can have up to three kernel stacks: |
| 108 | * process stack | 137 | * process stack |
| @@ -175,7 +204,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
| 175 | irq_stack = irq_stack_end - | 204 | irq_stack = irq_stack_end - |
| 176 | (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack); | 205 | (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack); |
| 177 | 206 | ||
| 178 | if (stack >= irq_stack && stack < irq_stack_end) { | 207 | if (in_irq_stack(stack, irq_stack, irq_stack_end)) { |
| 179 | if (ops->stack(data, "IRQ") < 0) | 208 | if (ops->stack(data, "IRQ") < 0) |
| 180 | break; | 209 | break; |
| 181 | bp = print_context_stack(tinfo, stack, bp, | 210 | bp = print_context_stack(tinfo, stack, bp, |
| @@ -186,6 +215,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
| 186 | * pointer (index -1 to end) in the IRQ stack: | 215 | * pointer (index -1 to end) in the IRQ stack: |
| 187 | */ | 216 | */ |
| 188 | stack = (unsigned long *) (irq_stack_end[-1]); | 217 | stack = (unsigned long *) (irq_stack_end[-1]); |
| 218 | bp = fixup_bp_irq_link(bp, stack, irq_stack, | ||
| 219 | irq_stack_end); | ||
| 189 | irq_stack_end = NULL; | 220 | irq_stack_end = NULL; |
| 190 | ops->stack(data, "EOI"); | 221 | ops->stack(data, "EOI"); |
| 191 | continue; | 222 | continue; |
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index d17d482a04f4..f50447d961c0 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
| @@ -732,7 +732,16 @@ struct early_res { | |||
| 732 | char overlap_ok; | 732 | char overlap_ok; |
| 733 | }; | 733 | }; |
| 734 | static struct early_res early_res[MAX_EARLY_RES] __initdata = { | 734 | static struct early_res early_res[MAX_EARLY_RES] __initdata = { |
| 735 | { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */ | 735 | { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */ |
| 736 | #ifdef CONFIG_X86_32 | ||
| 737 | /* | ||
| 738 | * But first pinch a few for the stack/trampoline stuff | ||
| 739 | * FIXME: Don't need the extra page at 4K, but need to fix | ||
| 740 | * trampoline before removing it. (see the GDT stuff) | ||
| 741 | */ | ||
| 742 | { PAGE_SIZE, PAGE_SIZE, "EX TRAMPOLINE", 1 }, | ||
| 743 | #endif | ||
| 744 | |||
| 736 | {} | 745 | {} |
| 737 | }; | 746 | }; |
| 738 | 747 | ||
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 63bca794c8f9..673f693fb451 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
| @@ -1076,10 +1076,10 @@ ENTRY(\sym) | |||
| 1076 | TRACE_IRQS_OFF | 1076 | TRACE_IRQS_OFF |
| 1077 | movq %rsp,%rdi /* pt_regs pointer */ | 1077 | movq %rsp,%rdi /* pt_regs pointer */ |
| 1078 | xorl %esi,%esi /* no error code */ | 1078 | xorl %esi,%esi /* no error code */ |
| 1079 | PER_CPU(init_tss, %rbp) | 1079 | PER_CPU(init_tss, %r12) |
| 1080 | subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp) | 1080 | subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12) |
| 1081 | call \do_sym | 1081 | call \do_sym |
| 1082 | addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp) | 1082 | addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12) |
| 1083 | jmp paranoid_exit /* %ebx: no swapgs flag */ | 1083 | jmp paranoid_exit /* %ebx: no swapgs flag */ |
| 1084 | CFI_ENDPROC | 1084 | CFI_ENDPROC |
| 1085 | END(\sym) | 1085 | END(\sym) |
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 4f8e2507e8f3..5051b94c9069 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c | |||
| @@ -29,8 +29,6 @@ static void __init i386_default_early_setup(void) | |||
| 29 | 29 | ||
| 30 | void __init i386_start_kernel(void) | 30 | void __init i386_start_kernel(void) |
| 31 | { | 31 | { |
| 32 | reserve_trampoline_memory(); | ||
| 33 | |||
| 34 | reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); | 32 | reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); |
| 35 | 33 | ||
| 36 | #ifdef CONFIG_BLK_DEV_INITRD | 34 | #ifdef CONFIG_BLK_DEV_INITRD |
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 0b06cd778fd9..b5a9896ca1e7 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
| @@ -98,8 +98,6 @@ void __init x86_64_start_reservations(char *real_mode_data) | |||
| 98 | { | 98 | { |
| 99 | copy_bootdata(__va(real_mode_data)); | 99 | copy_bootdata(__va(real_mode_data)); |
| 100 | 100 | ||
| 101 | reserve_trampoline_memory(); | ||
| 102 | |||
| 103 | reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); | 101 | reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); |
| 104 | 102 | ||
| 105 | #ifdef CONFIG_BLK_DEV_INITRD | 103 | #ifdef CONFIG_BLK_DEV_INITRD |
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index d42f65ac4927..05d5fec64a94 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c | |||
| @@ -362,8 +362,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp, | |||
| 362 | return ret; | 362 | return ret; |
| 363 | } | 363 | } |
| 364 | 364 | ||
| 365 | if (bp->callback) | 365 | ret = arch_store_info(bp); |
| 366 | ret = arch_store_info(bp); | ||
| 367 | 366 | ||
| 368 | if (ret < 0) | 367 | if (ret < 0) |
| 369 | return ret; | 368 | return ret; |
| @@ -519,7 +518,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) | |||
| 519 | break; | 518 | break; |
| 520 | } | 519 | } |
| 521 | 520 | ||
| 522 | (bp->callback)(bp, args->regs); | 521 | perf_bp_event(bp, args->regs); |
| 523 | 522 | ||
| 524 | rcu_read_unlock(); | 523 | rcu_read_unlock(); |
| 525 | } | 524 | } |
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 20a5b3689463..dd74fe7273b1 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
| @@ -86,9 +86,15 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
| 86 | gdb_regs[GDB_DS] = regs->ds; | 86 | gdb_regs[GDB_DS] = regs->ds; |
| 87 | gdb_regs[GDB_ES] = regs->es; | 87 | gdb_regs[GDB_ES] = regs->es; |
| 88 | gdb_regs[GDB_CS] = regs->cs; | 88 | gdb_regs[GDB_CS] = regs->cs; |
| 89 | gdb_regs[GDB_SS] = __KERNEL_DS; | ||
| 90 | gdb_regs[GDB_FS] = 0xFFFF; | 89 | gdb_regs[GDB_FS] = 0xFFFF; |
| 91 | gdb_regs[GDB_GS] = 0xFFFF; | 90 | gdb_regs[GDB_GS] = 0xFFFF; |
| 91 | if (user_mode_vm(regs)) { | ||
| 92 | gdb_regs[GDB_SS] = regs->ss; | ||
| 93 | gdb_regs[GDB_SP] = regs->sp; | ||
| 94 | } else { | ||
| 95 | gdb_regs[GDB_SS] = __KERNEL_DS; | ||
| 96 | gdb_regs[GDB_SP] = kernel_stack_pointer(regs); | ||
| 97 | } | ||
| 92 | #else | 98 | #else |
| 93 | gdb_regs[GDB_R8] = regs->r8; | 99 | gdb_regs[GDB_R8] = regs->r8; |
| 94 | gdb_regs[GDB_R9] = regs->r9; | 100 | gdb_regs[GDB_R9] = regs->r9; |
| @@ -101,8 +107,8 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
| 101 | gdb_regs32[GDB_PS] = regs->flags; | 107 | gdb_regs32[GDB_PS] = regs->flags; |
| 102 | gdb_regs32[GDB_CS] = regs->cs; | 108 | gdb_regs32[GDB_CS] = regs->cs; |
| 103 | gdb_regs32[GDB_SS] = regs->ss; | 109 | gdb_regs32[GDB_SS] = regs->ss; |
| 104 | #endif | ||
| 105 | gdb_regs[GDB_SP] = kernel_stack_pointer(regs); | 110 | gdb_regs[GDB_SP] = kernel_stack_pointer(regs); |
| 111 | #endif | ||
| 106 | } | 112 | } |
| 107 | 113 | ||
| 108 | /** | 114 | /** |
| @@ -220,8 +226,7 @@ static void kgdb_correct_hw_break(void) | |||
| 220 | dr7 |= ((breakinfo[breakno].len << 2) | | 226 | dr7 |= ((breakinfo[breakno].len << 2) | |
| 221 | breakinfo[breakno].type) << | 227 | breakinfo[breakno].type) << |
| 222 | ((breakno << 2) + 16); | 228 | ((breakno << 2) + 16); |
| 223 | if (breakno >= 0 && breakno <= 3) | 229 | set_debugreg(breakinfo[breakno].addr, breakno); |
| 224 | set_debugreg(breakinfo[breakno].addr, breakno); | ||
| 225 | 230 | ||
| 226 | } else { | 231 | } else { |
| 227 | if ((dr7 & breakbit) && !breakinfo[breakno].enabled) { | 232 | if ((dr7 & breakbit) && !breakinfo[breakno].enabled) { |
| @@ -395,7 +400,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, | |||
| 395 | /* set the trace bit if we're stepping */ | 400 | /* set the trace bit if we're stepping */ |
| 396 | if (remcomInBuffer[0] == 's') { | 401 | if (remcomInBuffer[0] == 's') { |
| 397 | linux_regs->flags |= X86_EFLAGS_TF; | 402 | linux_regs->flags |= X86_EFLAGS_TF; |
| 398 | kgdb_single_step = 1; | ||
| 399 | atomic_set(&kgdb_cpu_doing_single_step, | 403 | atomic_set(&kgdb_cpu_doing_single_step, |
| 400 | raw_smp_processor_id()); | 404 | raw_smp_processor_id()); |
| 401 | } | 405 | } |
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 63123d902103..37542b67c57e 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
| @@ -13,6 +13,9 @@ | |||
| 13 | * Licensed under the terms of the GNU General Public | 13 | * Licensed under the terms of the GNU General Public |
| 14 | * License version 2. See file COPYING for details. | 14 | * License version 2. See file COPYING for details. |
| 15 | */ | 15 | */ |
| 16 | |||
| 17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 18 | |||
| 16 | #include <linux/firmware.h> | 19 | #include <linux/firmware.h> |
| 17 | #include <linux/pci_ids.h> | 20 | #include <linux/pci_ids.h> |
| 18 | #include <linux/uaccess.h> | 21 | #include <linux/uaccess.h> |
| @@ -81,7 +84,7 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) | |||
| 81 | 84 | ||
| 82 | memset(csig, 0, sizeof(*csig)); | 85 | memset(csig, 0, sizeof(*csig)); |
| 83 | rdmsr(MSR_AMD64_PATCH_LEVEL, csig->rev, dummy); | 86 | rdmsr(MSR_AMD64_PATCH_LEVEL, csig->rev, dummy); |
| 84 | pr_info("microcode: CPU%d: patch_level=0x%x\n", cpu, csig->rev); | 87 | pr_info("CPU%d: patch_level=0x%x\n", cpu, csig->rev); |
| 85 | return 0; | 88 | return 0; |
| 86 | } | 89 | } |
| 87 | 90 | ||
| @@ -111,8 +114,8 @@ static int get_matching_microcode(int cpu, void *mc, int rev) | |||
| 111 | 114 | ||
| 112 | /* ucode might be chipset specific -- currently we don't support this */ | 115 | /* ucode might be chipset specific -- currently we don't support this */ |
| 113 | if (mc_header->nb_dev_id || mc_header->sb_dev_id) { | 116 | if (mc_header->nb_dev_id || mc_header->sb_dev_id) { |
| 114 | pr_err(KERN_ERR "microcode: CPU%d: loading of chipset " | 117 | pr_err("CPU%d: loading of chipset specific code not yet supported\n", |
| 115 | "specific code not yet supported\n", cpu); | 118 | cpu); |
| 116 | return 0; | 119 | return 0; |
| 117 | } | 120 | } |
| 118 | 121 | ||
| @@ -141,12 +144,12 @@ static int apply_microcode_amd(int cpu) | |||
| 141 | 144 | ||
| 142 | /* check current patch id and patch's id for match */ | 145 | /* check current patch id and patch's id for match */ |
| 143 | if (rev != mc_amd->hdr.patch_id) { | 146 | if (rev != mc_amd->hdr.patch_id) { |
| 144 | pr_err("microcode: CPU%d: update failed " | 147 | pr_err("CPU%d: update failed (for patch_level=0x%x)\n", |
| 145 | "(for patch_level=0x%x)\n", cpu, mc_amd->hdr.patch_id); | 148 | cpu, mc_amd->hdr.patch_id); |
| 146 | return -1; | 149 | return -1; |
| 147 | } | 150 | } |
| 148 | 151 | ||
| 149 | pr_info("microcode: CPU%d: updated (new patch_level=0x%x)\n", cpu, rev); | 152 | pr_info("CPU%d: updated (new patch_level=0x%x)\n", cpu, rev); |
| 150 | uci->cpu_sig.rev = rev; | 153 | uci->cpu_sig.rev = rev; |
| 151 | 154 | ||
| 152 | return 0; | 155 | return 0; |
| @@ -169,15 +172,14 @@ get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size) | |||
| 169 | return NULL; | 172 | return NULL; |
| 170 | 173 | ||
| 171 | if (section_hdr[0] != UCODE_UCODE_TYPE) { | 174 | if (section_hdr[0] != UCODE_UCODE_TYPE) { |
| 172 | pr_err("microcode: error: invalid type field in " | 175 | pr_err("error: invalid type field in container file section header\n"); |
| 173 | "container file section header\n"); | ||
| 174 | return NULL; | 176 | return NULL; |
| 175 | } | 177 | } |
| 176 | 178 | ||
| 177 | total_size = (unsigned long) (section_hdr[4] + (section_hdr[5] << 8)); | 179 | total_size = (unsigned long) (section_hdr[4] + (section_hdr[5] << 8)); |
| 178 | 180 | ||
| 179 | if (total_size > size || total_size > UCODE_MAX_SIZE) { | 181 | if (total_size > size || total_size > UCODE_MAX_SIZE) { |
| 180 | pr_err("microcode: error: size mismatch\n"); | 182 | pr_err("error: size mismatch\n"); |
| 181 | return NULL; | 183 | return NULL; |
| 182 | } | 184 | } |
| 183 | 185 | ||
| @@ -206,14 +208,13 @@ static int install_equiv_cpu_table(const u8 *buf) | |||
| 206 | size = buf_pos[2]; | 208 | size = buf_pos[2]; |
| 207 | 209 | ||
| 208 | if (buf_pos[1] != UCODE_EQUIV_CPU_TABLE_TYPE || !size) { | 210 | if (buf_pos[1] != UCODE_EQUIV_CPU_TABLE_TYPE || !size) { |
| 209 | pr_err("microcode: error: invalid type field in " | 211 | pr_err("error: invalid type field in container file section header\n"); |
| 210 | "container file section header\n"); | ||
| 211 | return 0; | 212 | return 0; |
| 212 | } | 213 | } |
| 213 | 214 | ||
| 214 | equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size); | 215 | equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size); |
| 215 | if (!equiv_cpu_table) { | 216 | if (!equiv_cpu_table) { |
| 216 | pr_err("microcode: failed to allocate equivalent CPU table\n"); | 217 | pr_err("failed to allocate equivalent CPU table\n"); |
| 217 | return 0; | 218 | return 0; |
| 218 | } | 219 | } |
| 219 | 220 | ||
| @@ -246,7 +247,7 @@ generic_load_microcode(int cpu, const u8 *data, size_t size) | |||
| 246 | 247 | ||
| 247 | offset = install_equiv_cpu_table(ucode_ptr); | 248 | offset = install_equiv_cpu_table(ucode_ptr); |
| 248 | if (!offset) { | 249 | if (!offset) { |
| 249 | pr_err("microcode: failed to create equivalent cpu table\n"); | 250 | pr_err("failed to create equivalent cpu table\n"); |
| 250 | return UCODE_ERROR; | 251 | return UCODE_ERROR; |
| 251 | } | 252 | } |
| 252 | 253 | ||
| @@ -277,8 +278,7 @@ generic_load_microcode(int cpu, const u8 *data, size_t size) | |||
| 277 | if (!leftover) { | 278 | if (!leftover) { |
| 278 | vfree(uci->mc); | 279 | vfree(uci->mc); |
| 279 | uci->mc = new_mc; | 280 | uci->mc = new_mc; |
| 280 | pr_debug("microcode: CPU%d found a matching microcode " | 281 | pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", |
| 281 | "update with version 0x%x (current=0x%x)\n", | ||
| 282 | cpu, new_rev, uci->cpu_sig.rev); | 282 | cpu, new_rev, uci->cpu_sig.rev); |
| 283 | } else { | 283 | } else { |
| 284 | vfree(new_mc); | 284 | vfree(new_mc); |
| @@ -300,7 +300,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) | |||
| 300 | return UCODE_NFOUND; | 300 | return UCODE_NFOUND; |
| 301 | 301 | ||
| 302 | if (*(u32 *)firmware->data != UCODE_MAGIC) { | 302 | if (*(u32 *)firmware->data != UCODE_MAGIC) { |
| 303 | pr_err("microcode: invalid UCODE_MAGIC (0x%08x)\n", | 303 | pr_err("invalid UCODE_MAGIC (0x%08x)\n", |
| 304 | *(u32 *)firmware->data); | 304 | *(u32 *)firmware->data); |
| 305 | return UCODE_ERROR; | 305 | return UCODE_ERROR; |
| 306 | } | 306 | } |
| @@ -313,8 +313,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) | |||
| 313 | static enum ucode_state | 313 | static enum ucode_state |
| 314 | request_microcode_user(int cpu, const void __user *buf, size_t size) | 314 | request_microcode_user(int cpu, const void __user *buf, size_t size) |
| 315 | { | 315 | { |
| 316 | pr_info("microcode: AMD microcode update via " | 316 | pr_info("AMD microcode update via /dev/cpu/microcode not supported\n"); |
| 317 | "/dev/cpu/microcode not supported\n"); | ||
| 318 | return UCODE_ERROR; | 317 | return UCODE_ERROR; |
| 319 | } | 318 | } |
| 320 | 319 | ||
| @@ -334,14 +333,13 @@ void init_microcode_amd(struct device *device) | |||
| 334 | WARN_ON(c->x86_vendor != X86_VENDOR_AMD); | 333 | WARN_ON(c->x86_vendor != X86_VENDOR_AMD); |
| 335 | 334 | ||
| 336 | if (c->x86 < 0x10) { | 335 | if (c->x86 < 0x10) { |
| 337 | pr_warning("microcode: AMD CPU family 0x%x not supported\n", | 336 | pr_warning("AMD CPU family 0x%x not supported\n", c->x86); |
| 338 | c->x86); | ||
| 339 | return; | 337 | return; |
| 340 | } | 338 | } |
| 341 | supported_cpu = 1; | 339 | supported_cpu = 1; |
| 342 | 340 | ||
| 343 | if (request_firmware(&firmware, fw_name, device)) | 341 | if (request_firmware(&firmware, fw_name, device)) |
| 344 | pr_err("microcode: failed to load file %s\n", fw_name); | 342 | pr_err("failed to load file %s\n", fw_name); |
| 345 | } | 343 | } |
| 346 | 344 | ||
| 347 | void fini_microcode_amd(void) | 345 | void fini_microcode_amd(void) |
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index e68aae397869..844c02c65fcb 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
| @@ -70,6 +70,9 @@ | |||
| 70 | * Fix sigmatch() macro to handle old CPUs with pf == 0. | 70 | * Fix sigmatch() macro to handle old CPUs with pf == 0. |
| 71 | * Thanks to Stuart Swales for pointing out this bug. | 71 | * Thanks to Stuart Swales for pointing out this bug. |
| 72 | */ | 72 | */ |
| 73 | |||
| 74 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 75 | |||
| 73 | #include <linux/platform_device.h> | 76 | #include <linux/platform_device.h> |
| 74 | #include <linux/miscdevice.h> | 77 | #include <linux/miscdevice.h> |
| 75 | #include <linux/capability.h> | 78 | #include <linux/capability.h> |
| @@ -209,7 +212,7 @@ static ssize_t microcode_write(struct file *file, const char __user *buf, | |||
| 209 | ssize_t ret = -EINVAL; | 212 | ssize_t ret = -EINVAL; |
| 210 | 213 | ||
| 211 | if ((len >> PAGE_SHIFT) > totalram_pages) { | 214 | if ((len >> PAGE_SHIFT) > totalram_pages) { |
| 212 | pr_err("microcode: too much data (max %ld pages)\n", totalram_pages); | 215 | pr_err("too much data (max %ld pages)\n", totalram_pages); |
| 213 | return ret; | 216 | return ret; |
| 214 | } | 217 | } |
| 215 | 218 | ||
| @@ -244,7 +247,7 @@ static int __init microcode_dev_init(void) | |||
| 244 | 247 | ||
| 245 | error = misc_register(µcode_dev); | 248 | error = misc_register(µcode_dev); |
| 246 | if (error) { | 249 | if (error) { |
| 247 | pr_err("microcode: can't misc_register on minor=%d\n", MICROCODE_MINOR); | 250 | pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR); |
| 248 | return error; | 251 | return error; |
| 249 | } | 252 | } |
| 250 | 253 | ||
| @@ -359,7 +362,7 @@ static enum ucode_state microcode_resume_cpu(int cpu) | |||
| 359 | if (!uci->mc) | 362 | if (!uci->mc) |
| 360 | return UCODE_NFOUND; | 363 | return UCODE_NFOUND; |
| 361 | 364 | ||
| 362 | pr_debug("microcode: CPU%d updated upon resume\n", cpu); | 365 | pr_debug("CPU%d updated upon resume\n", cpu); |
| 363 | apply_microcode_on_target(cpu); | 366 | apply_microcode_on_target(cpu); |
| 364 | 367 | ||
| 365 | return UCODE_OK; | 368 | return UCODE_OK; |
| @@ -379,7 +382,7 @@ static enum ucode_state microcode_init_cpu(int cpu) | |||
| 379 | ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev); | 382 | ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev); |
| 380 | 383 | ||
| 381 | if (ustate == UCODE_OK) { | 384 | if (ustate == UCODE_OK) { |
| 382 | pr_debug("microcode: CPU%d updated upon init\n", cpu); | 385 | pr_debug("CPU%d updated upon init\n", cpu); |
| 383 | apply_microcode_on_target(cpu); | 386 | apply_microcode_on_target(cpu); |
| 384 | } | 387 | } |
| 385 | 388 | ||
| @@ -406,7 +409,7 @@ static int mc_sysdev_add(struct sys_device *sys_dev) | |||
| 406 | if (!cpu_online(cpu)) | 409 | if (!cpu_online(cpu)) |
| 407 | return 0; | 410 | return 0; |
| 408 | 411 | ||
| 409 | pr_debug("microcode: CPU%d added\n", cpu); | 412 | pr_debug("CPU%d added\n", cpu); |
| 410 | 413 | ||
| 411 | err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group); | 414 | err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group); |
| 412 | if (err) | 415 | if (err) |
| @@ -425,7 +428,7 @@ static int mc_sysdev_remove(struct sys_device *sys_dev) | |||
| 425 | if (!cpu_online(cpu)) | 428 | if (!cpu_online(cpu)) |
| 426 | return 0; | 429 | return 0; |
| 427 | 430 | ||
| 428 | pr_debug("microcode: CPU%d removed\n", cpu); | 431 | pr_debug("CPU%d removed\n", cpu); |
| 429 | microcode_fini_cpu(cpu); | 432 | microcode_fini_cpu(cpu); |
| 430 | sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); | 433 | sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); |
| 431 | return 0; | 434 | return 0; |
| @@ -473,15 +476,15 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) | |||
| 473 | microcode_update_cpu(cpu); | 476 | microcode_update_cpu(cpu); |
| 474 | case CPU_DOWN_FAILED: | 477 | case CPU_DOWN_FAILED: |
| 475 | case CPU_DOWN_FAILED_FROZEN: | 478 | case CPU_DOWN_FAILED_FROZEN: |
| 476 | pr_debug("microcode: CPU%d added\n", cpu); | 479 | pr_debug("CPU%d added\n", cpu); |
| 477 | if (sysfs_create_group(&sys_dev->kobj, &mc_attr_group)) | 480 | if (sysfs_create_group(&sys_dev->kobj, &mc_attr_group)) |
| 478 | pr_err("microcode: Failed to create group for CPU%d\n", cpu); | 481 | pr_err("Failed to create group for CPU%d\n", cpu); |
| 479 | break; | 482 | break; |
| 480 | case CPU_DOWN_PREPARE: | 483 | case CPU_DOWN_PREPARE: |
| 481 | case CPU_DOWN_PREPARE_FROZEN: | 484 | case CPU_DOWN_PREPARE_FROZEN: |
| 482 | /* Suspend is in progress, only remove the interface */ | 485 | /* Suspend is in progress, only remove the interface */ |
| 483 | sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); | 486 | sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); |
| 484 | pr_debug("microcode: CPU%d removed\n", cpu); | 487 | pr_debug("CPU%d removed\n", cpu); |
| 485 | break; | 488 | break; |
| 486 | case CPU_DEAD: | 489 | case CPU_DEAD: |
| 487 | case CPU_UP_CANCELED_FROZEN: | 490 | case CPU_UP_CANCELED_FROZEN: |
| @@ -507,7 +510,7 @@ static int __init microcode_init(void) | |||
| 507 | microcode_ops = init_amd_microcode(); | 510 | microcode_ops = init_amd_microcode(); |
| 508 | 511 | ||
| 509 | if (!microcode_ops) { | 512 | if (!microcode_ops) { |
| 510 | pr_err("microcode: no support for this CPU vendor\n"); | 513 | pr_err("no support for this CPU vendor\n"); |
| 511 | return -ENODEV; | 514 | return -ENODEV; |
| 512 | } | 515 | } |
| 513 | 516 | ||
| @@ -541,8 +544,7 @@ static int __init microcode_init(void) | |||
| 541 | register_hotcpu_notifier(&mc_cpu_notifier); | 544 | register_hotcpu_notifier(&mc_cpu_notifier); |
| 542 | 545 | ||
| 543 | pr_info("Microcode Update Driver: v" MICROCODE_VERSION | 546 | pr_info("Microcode Update Driver: v" MICROCODE_VERSION |
| 544 | " <tigran@aivazian.fsnet.co.uk>," | 547 | " <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n"); |
| 545 | " Peter Oruba\n"); | ||
| 546 | 548 | ||
| 547 | return 0; | 549 | return 0; |
| 548 | } | 550 | } |
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c index 0d334ddd0a96..ebd193e476ca 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/microcode_intel.c | |||
| @@ -70,6 +70,9 @@ | |||
| 70 | * Fix sigmatch() macro to handle old CPUs with pf == 0. | 70 | * Fix sigmatch() macro to handle old CPUs with pf == 0. |
| 71 | * Thanks to Stuart Swales for pointing out this bug. | 71 | * Thanks to Stuart Swales for pointing out this bug. |
| 72 | */ | 72 | */ |
| 73 | |||
| 74 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 75 | |||
| 73 | #include <linux/firmware.h> | 76 | #include <linux/firmware.h> |
| 74 | #include <linux/uaccess.h> | 77 | #include <linux/uaccess.h> |
| 75 | #include <linux/kernel.h> | 78 | #include <linux/kernel.h> |
| @@ -146,8 +149,7 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) | |||
| 146 | 149 | ||
| 147 | if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || | 150 | if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || |
| 148 | cpu_has(c, X86_FEATURE_IA64)) { | 151 | cpu_has(c, X86_FEATURE_IA64)) { |
| 149 | printk(KERN_ERR "microcode: CPU%d not a capable Intel " | 152 | pr_err("CPU%d not a capable Intel processor\n", cpu_num); |
| 150 | "processor\n", cpu_num); | ||
| 151 | return -1; | 153 | return -1; |
| 152 | } | 154 | } |
| 153 | 155 | ||
| @@ -165,8 +167,8 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) | |||
| 165 | /* get the current revision from MSR 0x8B */ | 167 | /* get the current revision from MSR 0x8B */ |
| 166 | rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev); | 168 | rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev); |
| 167 | 169 | ||
| 168 | printk(KERN_INFO "microcode: CPU%d sig=0x%x, pf=0x%x, revision=0x%x\n", | 170 | pr_info("CPU%d sig=0x%x, pf=0x%x, revision=0x%x\n", |
| 169 | cpu_num, csig->sig, csig->pf, csig->rev); | 171 | cpu_num, csig->sig, csig->pf, csig->rev); |
| 170 | 172 | ||
| 171 | return 0; | 173 | return 0; |
| 172 | } | 174 | } |
| @@ -194,28 +196,24 @@ static int microcode_sanity_check(void *mc) | |||
| 194 | data_size = get_datasize(mc_header); | 196 | data_size = get_datasize(mc_header); |
| 195 | 197 | ||
| 196 | if (data_size + MC_HEADER_SIZE > total_size) { | 198 | if (data_size + MC_HEADER_SIZE > total_size) { |
| 197 | printk(KERN_ERR "microcode: error! " | 199 | pr_err("error! Bad data size in microcode data file\n"); |
| 198 | "Bad data size in microcode data file\n"); | ||
| 199 | return -EINVAL; | 200 | return -EINVAL; |
| 200 | } | 201 | } |
| 201 | 202 | ||
| 202 | if (mc_header->ldrver != 1 || mc_header->hdrver != 1) { | 203 | if (mc_header->ldrver != 1 || mc_header->hdrver != 1) { |
| 203 | printk(KERN_ERR "microcode: error! " | 204 | pr_err("error! Unknown microcode update format\n"); |
| 204 | "Unknown microcode update format\n"); | ||
| 205 | return -EINVAL; | 205 | return -EINVAL; |
| 206 | } | 206 | } |
| 207 | ext_table_size = total_size - (MC_HEADER_SIZE + data_size); | 207 | ext_table_size = total_size - (MC_HEADER_SIZE + data_size); |
| 208 | if (ext_table_size) { | 208 | if (ext_table_size) { |
| 209 | if ((ext_table_size < EXT_HEADER_SIZE) | 209 | if ((ext_table_size < EXT_HEADER_SIZE) |
| 210 | || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) { | 210 | || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) { |
| 211 | printk(KERN_ERR "microcode: error! " | 211 | pr_err("error! Small exttable size in microcode data file\n"); |
| 212 | "Small exttable size in microcode data file\n"); | ||
| 213 | return -EINVAL; | 212 | return -EINVAL; |
| 214 | } | 213 | } |
| 215 | ext_header = mc + MC_HEADER_SIZE + data_size; | 214 | ext_header = mc + MC_HEADER_SIZE + data_size; |
| 216 | if (ext_table_size != exttable_size(ext_header)) { | 215 | if (ext_table_size != exttable_size(ext_header)) { |
| 217 | printk(KERN_ERR "microcode: error! " | 216 | pr_err("error! Bad exttable size in microcode data file\n"); |
| 218 | "Bad exttable size in microcode data file\n"); | ||
| 219 | return -EFAULT; | 217 | return -EFAULT; |
| 220 | } | 218 | } |
| 221 | ext_sigcount = ext_header->count; | 219 | ext_sigcount = ext_header->count; |
| @@ -230,8 +228,7 @@ static int microcode_sanity_check(void *mc) | |||
| 230 | while (i--) | 228 | while (i--) |
| 231 | ext_table_sum += ext_tablep[i]; | 229 | ext_table_sum += ext_tablep[i]; |
| 232 | if (ext_table_sum) { | 230 | if (ext_table_sum) { |
| 233 | printk(KERN_WARNING "microcode: aborting, " | 231 | pr_warning("aborting, bad extended signature table checksum\n"); |
| 234 | "bad extended signature table checksum\n"); | ||
| 235 | return -EINVAL; | 232 | return -EINVAL; |
| 236 | } | 233 | } |
| 237 | } | 234 | } |
| @@ -242,7 +239,7 @@ static int microcode_sanity_check(void *mc) | |||
| 242 | while (i--) | 239 | while (i--) |
| 243 | orig_sum += ((int *)mc)[i]; | 240 | orig_sum += ((int *)mc)[i]; |
| 244 | if (orig_sum) { | 241 | if (orig_sum) { |
| 245 | printk(KERN_ERR "microcode: aborting, bad checksum\n"); | 242 | pr_err("aborting, bad checksum\n"); |
| 246 | return -EINVAL; | 243 | return -EINVAL; |
| 247 | } | 244 | } |
| 248 | if (!ext_table_size) | 245 | if (!ext_table_size) |
| @@ -255,7 +252,7 @@ static int microcode_sanity_check(void *mc) | |||
| 255 | - (mc_header->sig + mc_header->pf + mc_header->cksum) | 252 | - (mc_header->sig + mc_header->pf + mc_header->cksum) |
| 256 | + (ext_sig->sig + ext_sig->pf + ext_sig->cksum); | 253 | + (ext_sig->sig + ext_sig->pf + ext_sig->cksum); |
| 257 | if (sum) { | 254 | if (sum) { |
| 258 | printk(KERN_ERR "microcode: aborting, bad checksum\n"); | 255 | pr_err("aborting, bad checksum\n"); |
| 259 | return -EINVAL; | 256 | return -EINVAL; |
| 260 | } | 257 | } |
| 261 | } | 258 | } |
| @@ -327,13 +324,11 @@ static int apply_microcode(int cpu) | |||
| 327 | rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); | 324 | rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); |
| 328 | 325 | ||
| 329 | if (val[1] != mc_intel->hdr.rev) { | 326 | if (val[1] != mc_intel->hdr.rev) { |
| 330 | printk(KERN_ERR "microcode: CPU%d update " | 327 | pr_err("CPU%d update to revision 0x%x failed\n", |
| 331 | "to revision 0x%x failed\n", | 328 | cpu_num, mc_intel->hdr.rev); |
| 332 | cpu_num, mc_intel->hdr.rev); | ||
| 333 | return -1; | 329 | return -1; |
| 334 | } | 330 | } |
| 335 | printk(KERN_INFO "microcode: CPU%d updated to revision " | 331 | pr_info("CPU%d updated to revision 0x%x, date = %04x-%02x-%02x \n", |
| 336 | "0x%x, date = %04x-%02x-%02x \n", | ||
| 337 | cpu_num, val[1], | 332 | cpu_num, val[1], |
| 338 | mc_intel->hdr.date & 0xffff, | 333 | mc_intel->hdr.date & 0xffff, |
| 339 | mc_intel->hdr.date >> 24, | 334 | mc_intel->hdr.date >> 24, |
| @@ -362,8 +357,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
| 362 | 357 | ||
| 363 | mc_size = get_totalsize(&mc_header); | 358 | mc_size = get_totalsize(&mc_header); |
| 364 | if (!mc_size || mc_size > leftover) { | 359 | if (!mc_size || mc_size > leftover) { |
| 365 | printk(KERN_ERR "microcode: error!" | 360 | pr_err("error! Bad data in microcode data file\n"); |
| 366 | "Bad data in microcode data file\n"); | ||
| 367 | break; | 361 | break; |
| 368 | } | 362 | } |
| 369 | 363 | ||
| @@ -405,9 +399,8 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
| 405 | vfree(uci->mc); | 399 | vfree(uci->mc); |
| 406 | uci->mc = (struct microcode_intel *)new_mc; | 400 | uci->mc = (struct microcode_intel *)new_mc; |
| 407 | 401 | ||
| 408 | pr_debug("microcode: CPU%d found a matching microcode update with" | 402 | pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", |
| 409 | " version 0x%x (current=0x%x)\n", | 403 | cpu, new_rev, uci->cpu_sig.rev); |
| 410 | cpu, new_rev, uci->cpu_sig.rev); | ||
| 411 | out: | 404 | out: |
| 412 | return state; | 405 | return state; |
| 413 | } | 406 | } |
| @@ -429,7 +422,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) | |||
| 429 | c->x86, c->x86_model, c->x86_mask); | 422 | c->x86, c->x86_model, c->x86_mask); |
| 430 | 423 | ||
| 431 | if (request_firmware(&firmware, name, device)) { | 424 | if (request_firmware(&firmware, name, device)) { |
| 432 | pr_debug("microcode: data file %s load failed\n", name); | 425 | pr_debug("data file %s load failed\n", name); |
| 433 | return UCODE_NFOUND; | 426 | return UCODE_NFOUND; |
| 434 | } | 427 | } |
| 435 | 428 | ||
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 35a57c963df9..40b54ceb68b5 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
| @@ -945,9 +945,6 @@ void __init early_reserve_e820_mpc_new(void) | |||
| 945 | { | 945 | { |
| 946 | if (enable_update_mptable && alloc_mptable) { | 946 | if (enable_update_mptable && alloc_mptable) { |
| 947 | u64 startt = 0; | 947 | u64 startt = 0; |
| 948 | #ifdef CONFIG_X86_TRAMPOLINE | ||
| 949 | startt = TRAMPOLINE_BASE; | ||
| 950 | #endif | ||
| 951 | mpc_new_phys = early_reserve_e820(startt, mpc_new_length, 4); | 948 | mpc_new_phys = early_reserve_e820(startt, mpc_new_length, 4); |
| 952 | } | 949 | } |
| 953 | } | 950 | } |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index afcc58b69c7c..fcc2f2bfa39c 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
| @@ -120,11 +120,14 @@ static void __init dma32_free_bootmem(void) | |||
| 120 | 120 | ||
| 121 | void __init pci_iommu_alloc(void) | 121 | void __init pci_iommu_alloc(void) |
| 122 | { | 122 | { |
| 123 | int use_swiotlb; | ||
| 124 | |||
| 125 | use_swiotlb = pci_swiotlb_init(); | ||
| 123 | #ifdef CONFIG_X86_64 | 126 | #ifdef CONFIG_X86_64 |
| 124 | /* free the range so iommu could get some range less than 4G */ | 127 | /* free the range so iommu could get some range less than 4G */ |
| 125 | dma32_free_bootmem(); | 128 | dma32_free_bootmem(); |
| 126 | #endif | 129 | #endif |
| 127 | if (pci_swiotlb_init()) | 130 | if (use_swiotlb) |
| 128 | return; | 131 | return; |
| 129 | 132 | ||
| 130 | gart_iommu_hole_init(); | 133 | gart_iommu_hole_init(); |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index e6a0d402f171..56c0e730d3fe 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
| @@ -710,7 +710,8 @@ static void gart_iommu_shutdown(void) | |||
| 710 | struct pci_dev *dev; | 710 | struct pci_dev *dev; |
| 711 | int i; | 711 | int i; |
| 712 | 712 | ||
| 713 | if (no_agp) | 713 | /* don't shutdown it if there is AGP installed */ |
| 714 | if (!no_agp) | ||
| 714 | return; | 715 | return; |
| 715 | 716 | ||
| 716 | for (i = 0; i < num_k8_northbridges; i++) { | 717 | for (i = 0; i < num_k8_northbridges; i++) { |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 5e2ba634ea15..7a7bd4e3ec49 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
| @@ -10,6 +10,8 @@ | |||
| 10 | #include <linux/clockchips.h> | 10 | #include <linux/clockchips.h> |
| 11 | #include <linux/random.h> | 11 | #include <linux/random.h> |
| 12 | #include <linux/user-return-notifier.h> | 12 | #include <linux/user-return-notifier.h> |
| 13 | #include <linux/dmi.h> | ||
| 14 | #include <linux/utsname.h> | ||
| 13 | #include <trace/events/power.h> | 15 | #include <trace/events/power.h> |
| 14 | #include <linux/hw_breakpoint.h> | 16 | #include <linux/hw_breakpoint.h> |
| 15 | #include <asm/system.h> | 17 | #include <asm/system.h> |
| @@ -90,6 +92,25 @@ void exit_thread(void) | |||
| 90 | } | 92 | } |
| 91 | } | 93 | } |
| 92 | 94 | ||
| 95 | void show_regs_common(void) | ||
| 96 | { | ||
| 97 | const char *board, *product; | ||
| 98 | |||
| 99 | board = dmi_get_system_info(DMI_BOARD_NAME); | ||
| 100 | if (!board) | ||
| 101 | board = ""; | ||
| 102 | product = dmi_get_system_info(DMI_PRODUCT_NAME); | ||
| 103 | if (!product) | ||
| 104 | product = ""; | ||
| 105 | |||
| 106 | printk("\n"); | ||
| 107 | printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n", | ||
| 108 | current->pid, current->comm, print_tainted(), | ||
| 109 | init_utsname()->release, | ||
| 110 | (int)strcspn(init_utsname()->version, " "), | ||
| 111 | init_utsname()->version, board, product); | ||
| 112 | } | ||
| 113 | |||
| 93 | void flush_thread(void) | 114 | void flush_thread(void) |
| 94 | { | 115 | { |
| 95 | struct task_struct *tsk = current; | 116 | struct task_struct *tsk = current; |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 075580b35682..120b88797a75 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | #include <linux/vmalloc.h> | 23 | #include <linux/vmalloc.h> |
| 24 | #include <linux/user.h> | 24 | #include <linux/user.h> |
| 25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
| 26 | #include <linux/utsname.h> | ||
| 27 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
| 28 | #include <linux/reboot.h> | 27 | #include <linux/reboot.h> |
| 29 | #include <linux/init.h> | 28 | #include <linux/init.h> |
| @@ -35,7 +34,6 @@ | |||
| 35 | #include <linux/tick.h> | 34 | #include <linux/tick.h> |
| 36 | #include <linux/percpu.h> | 35 | #include <linux/percpu.h> |
| 37 | #include <linux/prctl.h> | 36 | #include <linux/prctl.h> |
| 38 | #include <linux/dmi.h> | ||
| 39 | #include <linux/ftrace.h> | 37 | #include <linux/ftrace.h> |
| 40 | #include <linux/uaccess.h> | 38 | #include <linux/uaccess.h> |
| 41 | #include <linux/io.h> | 39 | #include <linux/io.h> |
| @@ -128,7 +126,6 @@ void __show_regs(struct pt_regs *regs, int all) | |||
| 128 | unsigned long d0, d1, d2, d3, d6, d7; | 126 | unsigned long d0, d1, d2, d3, d6, d7; |
| 129 | unsigned long sp; | 127 | unsigned long sp; |
| 130 | unsigned short ss, gs; | 128 | unsigned short ss, gs; |
| 131 | const char *board; | ||
| 132 | 129 | ||
| 133 | if (user_mode_vm(regs)) { | 130 | if (user_mode_vm(regs)) { |
| 134 | sp = regs->sp; | 131 | sp = regs->sp; |
| @@ -140,16 +137,7 @@ void __show_regs(struct pt_regs *regs, int all) | |||
| 140 | savesegment(gs, gs); | 137 | savesegment(gs, gs); |
| 141 | } | 138 | } |
| 142 | 139 | ||
| 143 | printk("\n"); | 140 | show_regs_common(); |
| 144 | |||
| 145 | board = dmi_get_system_info(DMI_PRODUCT_NAME); | ||
| 146 | if (!board) | ||
| 147 | board = ""; | ||
| 148 | printk("Pid: %d, comm: %s %s (%s %.*s) %s\n", | ||
| 149 | task_pid_nr(current), current->comm, | ||
| 150 | print_tainted(), init_utsname()->release, | ||
| 151 | (int)strcspn(init_utsname()->version, " "), | ||
| 152 | init_utsname()->version, board); | ||
| 153 | 141 | ||
| 154 | printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", | 142 | printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", |
| 155 | (u16)regs->cs, regs->ip, regs->flags, | 143 | (u16)regs->cs, regs->ip, regs->flags, |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index c95c8f4e790a..e5ab0cd0ef36 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
| 27 | #include <linux/user.h> | 27 | #include <linux/user.h> |
| 28 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
| 29 | #include <linux/utsname.h> | ||
| 30 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
| 31 | #include <linux/module.h> | 30 | #include <linux/module.h> |
| 32 | #include <linux/ptrace.h> | 31 | #include <linux/ptrace.h> |
| @@ -38,7 +37,6 @@ | |||
| 38 | #include <linux/uaccess.h> | 37 | #include <linux/uaccess.h> |
| 39 | #include <linux/io.h> | 38 | #include <linux/io.h> |
| 40 | #include <linux/ftrace.h> | 39 | #include <linux/ftrace.h> |
| 41 | #include <linux/dmi.h> | ||
| 42 | 40 | ||
| 43 | #include <asm/pgtable.h> | 41 | #include <asm/pgtable.h> |
| 44 | #include <asm/system.h> | 42 | #include <asm/system.h> |
| @@ -163,18 +161,8 @@ void __show_regs(struct pt_regs *regs, int all) | |||
| 163 | unsigned long d0, d1, d2, d3, d6, d7; | 161 | unsigned long d0, d1, d2, d3, d6, d7; |
| 164 | unsigned int fsindex, gsindex; | 162 | unsigned int fsindex, gsindex; |
| 165 | unsigned int ds, cs, es; | 163 | unsigned int ds, cs, es; |
| 166 | const char *board; | 164 | |
| 167 | 165 | show_regs_common(); | |
| 168 | printk("\n"); | ||
| 169 | print_modules(); | ||
| 170 | board = dmi_get_system_info(DMI_PRODUCT_NAME); | ||
| 171 | if (!board) | ||
| 172 | board = ""; | ||
| 173 | printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n", | ||
| 174 | current->pid, current->comm, print_tainted(), | ||
| 175 | init_utsname()->release, | ||
| 176 | (int)strcspn(init_utsname()->version, " "), | ||
| 177 | init_utsname()->version, board); | ||
| 178 | printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); | 166 | printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); |
| 179 | printk_address(regs->ip, 1); | 167 | printk_address(regs->ip, 1); |
| 180 | printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, | 168 | printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 04d182a7cfdb..7079ddaf0731 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
| @@ -555,7 +555,9 @@ static int genregs_set(struct task_struct *target, | |||
| 555 | return ret; | 555 | return ret; |
| 556 | } | 556 | } |
| 557 | 557 | ||
| 558 | static void ptrace_triggered(struct perf_event *bp, void *data) | 558 | static void ptrace_triggered(struct perf_event *bp, int nmi, |
| 559 | struct perf_sample_data *data, | ||
| 560 | struct pt_regs *regs) | ||
| 559 | { | 561 | { |
| 560 | int i; | 562 | int i; |
| 561 | struct thread_struct *thread = &(current->thread); | 563 | struct thread_struct *thread = &(current->thread); |
| @@ -593,13 +595,13 @@ static unsigned long ptrace_get_dr7(struct perf_event *bp[]) | |||
| 593 | return dr7; | 595 | return dr7; |
| 594 | } | 596 | } |
| 595 | 597 | ||
| 596 | static struct perf_event * | 598 | static int |
| 597 | ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, | 599 | ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, |
| 598 | struct task_struct *tsk, int disabled) | 600 | struct task_struct *tsk, int disabled) |
| 599 | { | 601 | { |
| 600 | int err; | 602 | int err; |
| 601 | int gen_len, gen_type; | 603 | int gen_len, gen_type; |
| 602 | DEFINE_BREAKPOINT_ATTR(attr); | 604 | struct perf_event_attr attr; |
| 603 | 605 | ||
| 604 | /* | 606 | /* |
| 605 | * We shoud have at least an inactive breakpoint at this | 607 | * We shoud have at least an inactive breakpoint at this |
| @@ -607,18 +609,18 @@ ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, | |||
| 607 | * written the address register first | 609 | * written the address register first |
| 608 | */ | 610 | */ |
| 609 | if (!bp) | 611 | if (!bp) |
| 610 | return ERR_PTR(-EINVAL); | 612 | return -EINVAL; |
| 611 | 613 | ||
| 612 | err = arch_bp_generic_fields(len, type, &gen_len, &gen_type); | 614 | err = arch_bp_generic_fields(len, type, &gen_len, &gen_type); |
| 613 | if (err) | 615 | if (err) |
| 614 | return ERR_PTR(err); | 616 | return err; |
| 615 | 617 | ||
| 616 | attr = bp->attr; | 618 | attr = bp->attr; |
| 617 | attr.bp_len = gen_len; | 619 | attr.bp_len = gen_len; |
| 618 | attr.bp_type = gen_type; | 620 | attr.bp_type = gen_type; |
| 619 | attr.disabled = disabled; | 621 | attr.disabled = disabled; |
| 620 | 622 | ||
| 621 | return modify_user_hw_breakpoint(bp, &attr, bp->callback, tsk); | 623 | return modify_user_hw_breakpoint(bp, &attr); |
| 622 | } | 624 | } |
| 623 | 625 | ||
| 624 | /* | 626 | /* |
| @@ -656,28 +658,17 @@ restore: | |||
| 656 | if (!second_pass) | 658 | if (!second_pass) |
| 657 | continue; | 659 | continue; |
| 658 | 660 | ||
| 659 | thread->ptrace_bps[i] = NULL; | 661 | rc = ptrace_modify_breakpoint(bp, len, type, |
| 660 | bp = ptrace_modify_breakpoint(bp, len, type, | ||
| 661 | tsk, 1); | 662 | tsk, 1); |
| 662 | if (IS_ERR(bp)) { | 663 | if (rc) |
| 663 | rc = PTR_ERR(bp); | ||
| 664 | thread->ptrace_bps[i] = NULL; | ||
| 665 | break; | 664 | break; |
| 666 | } | ||
| 667 | thread->ptrace_bps[i] = bp; | ||
| 668 | } | 665 | } |
| 669 | continue; | 666 | continue; |
| 670 | } | 667 | } |
| 671 | 668 | ||
| 672 | bp = ptrace_modify_breakpoint(bp, len, type, tsk, 0); | 669 | rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0); |
| 673 | 670 | if (rc) | |
| 674 | /* Incorrect bp, or we have a bug in bp API */ | ||
| 675 | if (IS_ERR(bp)) { | ||
| 676 | rc = PTR_ERR(bp); | ||
| 677 | thread->ptrace_bps[i] = NULL; | ||
| 678 | break; | 671 | break; |
| 679 | } | ||
| 680 | thread->ptrace_bps[i] = bp; | ||
| 681 | } | 672 | } |
| 682 | /* | 673 | /* |
| 683 | * Make a second pass to free the remaining unused breakpoints | 674 | * Make a second pass to free the remaining unused breakpoints |
| @@ -721,9 +712,10 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, | |||
| 721 | { | 712 | { |
| 722 | struct perf_event *bp; | 713 | struct perf_event *bp; |
| 723 | struct thread_struct *t = &tsk->thread; | 714 | struct thread_struct *t = &tsk->thread; |
| 724 | DEFINE_BREAKPOINT_ATTR(attr); | 715 | struct perf_event_attr attr; |
| 725 | 716 | ||
| 726 | if (!t->ptrace_bps[nr]) { | 717 | if (!t->ptrace_bps[nr]) { |
| 718 | hw_breakpoint_init(&attr); | ||
| 727 | /* | 719 | /* |
| 728 | * Put stub len and type to register (reserve) an inactive but | 720 | * Put stub len and type to register (reserve) an inactive but |
| 729 | * correct bp | 721 | * correct bp |
| @@ -734,26 +726,32 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, | |||
| 734 | attr.disabled = 1; | 726 | attr.disabled = 1; |
| 735 | 727 | ||
| 736 | bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); | 728 | bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); |
| 729 | |||
| 730 | /* | ||
| 731 | * CHECKME: the previous code returned -EIO if the addr wasn't | ||
| 732 | * a valid task virtual addr. The new one will return -EINVAL in | ||
| 733 | * this case. | ||
| 734 | * -EINVAL may be what we want for in-kernel breakpoints users, | ||
| 735 | * but -EIO looks better for ptrace, since we refuse a register | ||
| 736 | * writing for the user. And anyway this is the previous | ||
| 737 | * behaviour. | ||
| 738 | */ | ||
| 739 | if (IS_ERR(bp)) | ||
| 740 | return PTR_ERR(bp); | ||
| 741 | |||
| 742 | t->ptrace_bps[nr] = bp; | ||
| 737 | } else { | 743 | } else { |
| 744 | int err; | ||
| 745 | |||
| 738 | bp = t->ptrace_bps[nr]; | 746 | bp = t->ptrace_bps[nr]; |
| 739 | t->ptrace_bps[nr] = NULL; | ||
| 740 | 747 | ||
| 741 | attr = bp->attr; | 748 | attr = bp->attr; |
| 742 | attr.bp_addr = addr; | 749 | attr.bp_addr = addr; |
| 743 | bp = modify_user_hw_breakpoint(bp, &attr, bp->callback, tsk); | 750 | err = modify_user_hw_breakpoint(bp, &attr); |
| 751 | if (err) | ||
| 752 | return err; | ||
| 744 | } | 753 | } |
| 745 | /* | ||
| 746 | * CHECKME: the previous code returned -EIO if the addr wasn't a | ||
| 747 | * valid task virtual addr. The new one will return -EINVAL in this | ||
| 748 | * case. | ||
| 749 | * -EINVAL may be what we want for in-kernel breakpoints users, but | ||
| 750 | * -EIO looks better for ptrace, since we refuse a register writing | ||
| 751 | * for the user. And anyway this is the previous behaviour. | ||
| 752 | */ | ||
| 753 | if (IS_ERR(bp)) | ||
| 754 | return PTR_ERR(bp); | ||
| 755 | 754 | ||
| 756 | t->ptrace_bps[nr] = bp; | ||
| 757 | 755 | ||
| 758 | return 0; | 756 | return 0; |
| 759 | } | 757 | } |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 2b97fc5b124e..1545bc0c9845 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
| @@ -259,6 +259,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
| 259 | DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"), | 259 | DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"), |
| 260 | }, | 260 | }, |
| 261 | }, | 261 | }, |
| 262 | { /* Handle problems with rebooting on ASUS P4S800 */ | ||
| 263 | .callback = set_bios_reboot, | ||
| 264 | .ident = "ASUS P4S800", | ||
| 265 | .matches = { | ||
| 266 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | ||
| 267 | DMI_MATCH(DMI_BOARD_NAME, "P4S800"), | ||
| 268 | }, | ||
| 269 | }, | ||
| 262 | { } | 270 | { } |
| 263 | }; | 271 | }; |
| 264 | 272 | ||
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 946a311a25c9..f7b8b9894b22 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
| @@ -73,6 +73,7 @@ | |||
| 73 | 73 | ||
| 74 | #include <asm/mtrr.h> | 74 | #include <asm/mtrr.h> |
| 75 | #include <asm/apic.h> | 75 | #include <asm/apic.h> |
| 76 | #include <asm/trampoline.h> | ||
| 76 | #include <asm/e820.h> | 77 | #include <asm/e820.h> |
| 77 | #include <asm/mpspec.h> | 78 | #include <asm/mpspec.h> |
| 78 | #include <asm/setup.h> | 79 | #include <asm/setup.h> |
| @@ -875,6 +876,13 @@ void __init setup_arch(char **cmdline_p) | |||
| 875 | 876 | ||
| 876 | reserve_brk(); | 877 | reserve_brk(); |
| 877 | 878 | ||
| 879 | /* | ||
| 880 | * Find and reserve possible boot-time SMP configuration: | ||
| 881 | */ | ||
| 882 | find_smp_config(); | ||
| 883 | |||
| 884 | reserve_trampoline_memory(); | ||
| 885 | |||
| 878 | #ifdef CONFIG_ACPI_SLEEP | 886 | #ifdef CONFIG_ACPI_SLEEP |
| 879 | /* | 887 | /* |
| 880 | * Reserve low memory region for sleep support. | 888 | * Reserve low memory region for sleep support. |
| @@ -921,11 +929,6 @@ void __init setup_arch(char **cmdline_p) | |||
| 921 | 929 | ||
| 922 | early_acpi_boot_init(); | 930 | early_acpi_boot_init(); |
| 923 | 931 | ||
| 924 | /* | ||
| 925 | * Find and reserve possible boot-time SMP configuration: | ||
| 926 | */ | ||
| 927 | find_smp_config(); | ||
| 928 | |||
| 929 | #ifdef CONFIG_ACPI_NUMA | 932 | #ifdef CONFIG_ACPI_NUMA |
| 930 | /* | 933 | /* |
| 931 | * Parse SRAT to discover nodes. | 934 | * Parse SRAT to discover nodes. |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index d559af913e1f..35abcb8b00e9 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
| @@ -1,3 +1,5 @@ | |||
| 1 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 2 | |||
| 1 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
| 2 | #include <linux/module.h> | 4 | #include <linux/module.h> |
| 3 | #include <linux/init.h> | 5 | #include <linux/init.h> |
| @@ -20,9 +22,9 @@ | |||
| 20 | #include <asm/stackprotector.h> | 22 | #include <asm/stackprotector.h> |
| 21 | 23 | ||
| 22 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | 24 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
| 23 | # define DBG(x...) printk(KERN_DEBUG x) | 25 | # define DBG(fmt, ...) pr_dbg(fmt, ##__VA_ARGS__) |
| 24 | #else | 26 | #else |
| 25 | # define DBG(x...) | 27 | # define DBG(fmt, ...) do { if (0) pr_dbg(fmt, ##__VA_ARGS__); } while (0) |
| 26 | #endif | 28 | #endif |
| 27 | 29 | ||
| 28 | DEFINE_PER_CPU(int, cpu_number); | 30 | DEFINE_PER_CPU(int, cpu_number); |
| @@ -116,8 +118,8 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, | |||
| 116 | } else { | 118 | } else { |
| 117 | ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node), | 119 | ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node), |
| 118 | size, align, goal); | 120 | size, align, goal); |
| 119 | pr_debug("per cpu data for cpu%d %lu bytes on node%d at " | 121 | pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n", |
| 120 | "%016lx\n", cpu, size, node, __pa(ptr)); | 122 | cpu, size, node, __pa(ptr)); |
| 121 | } | 123 | } |
| 122 | return ptr; | 124 | return ptr; |
| 123 | #else | 125 | #else |
| @@ -198,8 +200,7 @@ void __init setup_per_cpu_areas(void) | |||
| 198 | pcpu_cpu_distance, | 200 | pcpu_cpu_distance, |
| 199 | pcpu_fc_alloc, pcpu_fc_free); | 201 | pcpu_fc_alloc, pcpu_fc_free); |
| 200 | if (rc < 0) | 202 | if (rc < 0) |
| 201 | pr_warning("PERCPU: %s allocator failed (%d), " | 203 | pr_warning("%s allocator failed (%d), falling back to page size\n", |
| 202 | "falling back to page size\n", | ||
| 203 | pcpu_fc_names[pcpu_chosen_fc], rc); | 204 | pcpu_fc_names[pcpu_chosen_fc], rc); |
| 204 | } | 205 | } |
| 205 | if (rc < 0) | 206 | if (rc < 0) |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 324f2a44c221..678d0b8c26f3 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -671,6 +671,26 @@ static void __cpuinit do_fork_idle(struct work_struct *work) | |||
| 671 | complete(&c_idle->done); | 671 | complete(&c_idle->done); |
| 672 | } | 672 | } |
| 673 | 673 | ||
| 674 | /* reduce the number of lines printed when booting a large cpu count system */ | ||
| 675 | static void __cpuinit announce_cpu(int cpu, int apicid) | ||
| 676 | { | ||
| 677 | static int current_node = -1; | ||
| 678 | int node = cpu_to_node(cpu); | ||
| 679 | |||
| 680 | if (system_state == SYSTEM_BOOTING) { | ||
| 681 | if (node != current_node) { | ||
| 682 | if (current_node > (-1)) | ||
| 683 | pr_cont(" Ok.\n"); | ||
| 684 | current_node = node; | ||
| 685 | pr_info("Booting Node %3d, Processors ", node); | ||
| 686 | } | ||
| 687 | pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : ""); | ||
| 688 | return; | ||
| 689 | } else | ||
| 690 | pr_info("Booting Node %d Processor %d APIC 0x%x\n", | ||
| 691 | node, cpu, apicid); | ||
| 692 | } | ||
| 693 | |||
| 674 | /* | 694 | /* |
| 675 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad | 695 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad |
| 676 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. | 696 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. |
| @@ -687,7 +707,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) | |||
| 687 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | 707 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), |
| 688 | }; | 708 | }; |
| 689 | 709 | ||
| 690 | INIT_WORK(&c_idle.work, do_fork_idle); | 710 | INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle); |
| 691 | 711 | ||
| 692 | alternatives_smp_switch(1); | 712 | alternatives_smp_switch(1); |
| 693 | 713 | ||
| @@ -713,6 +733,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) | |||
| 713 | 733 | ||
| 714 | if (IS_ERR(c_idle.idle)) { | 734 | if (IS_ERR(c_idle.idle)) { |
| 715 | printk("failed fork for CPU %d\n", cpu); | 735 | printk("failed fork for CPU %d\n", cpu); |
| 736 | destroy_work_on_stack(&c_idle.work); | ||
| 716 | return PTR_ERR(c_idle.idle); | 737 | return PTR_ERR(c_idle.idle); |
| 717 | } | 738 | } |
| 718 | 739 | ||
| @@ -736,9 +757,8 @@ do_rest: | |||
| 736 | /* start_ip had better be page-aligned! */ | 757 | /* start_ip had better be page-aligned! */ |
| 737 | start_ip = setup_trampoline(); | 758 | start_ip = setup_trampoline(); |
| 738 | 759 | ||
| 739 | /* So we see what's up */ | 760 | /* So we see what's up */ |
| 740 | printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n", | 761 | announce_cpu(cpu, apicid); |
| 741 | cpu, apicid, start_ip); | ||
| 742 | 762 | ||
| 743 | /* | 763 | /* |
| 744 | * This grunge runs the startup process for | 764 | * This grunge runs the startup process for |
| @@ -787,21 +807,17 @@ do_rest: | |||
| 787 | udelay(100); | 807 | udelay(100); |
| 788 | } | 808 | } |
| 789 | 809 | ||
| 790 | if (cpumask_test_cpu(cpu, cpu_callin_mask)) { | 810 | if (cpumask_test_cpu(cpu, cpu_callin_mask)) |
| 791 | /* number CPUs logically, starting from 1 (BSP is 0) */ | 811 | pr_debug("CPU%d: has booted.\n", cpu); |
| 792 | pr_debug("OK.\n"); | 812 | else { |
| 793 | printk(KERN_INFO "CPU%d: ", cpu); | ||
| 794 | print_cpu_info(&cpu_data(cpu)); | ||
| 795 | pr_debug("CPU has booted.\n"); | ||
| 796 | } else { | ||
| 797 | boot_error = 1; | 813 | boot_error = 1; |
| 798 | if (*((volatile unsigned char *)trampoline_base) | 814 | if (*((volatile unsigned char *)trampoline_base) |
| 799 | == 0xA5) | 815 | == 0xA5) |
| 800 | /* trampoline started but...? */ | 816 | /* trampoline started but...? */ |
| 801 | printk(KERN_ERR "Stuck ??\n"); | 817 | pr_err("CPU%d: Stuck ??\n", cpu); |
| 802 | else | 818 | else |
| 803 | /* trampoline code not run */ | 819 | /* trampoline code not run */ |
| 804 | printk(KERN_ERR "Not responding.\n"); | 820 | pr_err("CPU%d: Not responding.\n", cpu); |
| 805 | if (apic->inquire_remote_apic) | 821 | if (apic->inquire_remote_apic) |
| 806 | apic->inquire_remote_apic(apicid); | 822 | apic->inquire_remote_apic(apicid); |
| 807 | } | 823 | } |
| @@ -831,6 +847,7 @@ do_rest: | |||
| 831 | smpboot_restore_warm_reset_vector(); | 847 | smpboot_restore_warm_reset_vector(); |
| 832 | } | 848 | } |
| 833 | 849 | ||
| 850 | destroy_work_on_stack(&c_idle.work); | ||
| 834 | return boot_error; | 851 | return boot_error; |
| 835 | } | 852 | } |
| 836 | 853 | ||
| @@ -1291,14 +1308,16 @@ void native_cpu_die(unsigned int cpu) | |||
| 1291 | for (i = 0; i < 10; i++) { | 1308 | for (i = 0; i < 10; i++) { |
| 1292 | /* They ack this in play_dead by setting CPU_DEAD */ | 1309 | /* They ack this in play_dead by setting CPU_DEAD */ |
| 1293 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { | 1310 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { |
| 1294 | printk(KERN_INFO "CPU %d is now offline\n", cpu); | 1311 | if (system_state == SYSTEM_RUNNING) |
| 1312 | pr_info("CPU %u is now offline\n", cpu); | ||
| 1313 | |||
| 1295 | if (1 == num_online_cpus()) | 1314 | if (1 == num_online_cpus()) |
| 1296 | alternatives_smp_switch(0); | 1315 | alternatives_smp_switch(0); |
| 1297 | return; | 1316 | return; |
| 1298 | } | 1317 | } |
| 1299 | msleep(100); | 1318 | msleep(100); |
| 1300 | } | 1319 | } |
| 1301 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | 1320 | pr_err("CPU %u didn't die...\n", cpu); |
| 1302 | } | 1321 | } |
| 1303 | 1322 | ||
| 1304 | void play_dead_common(void) | 1323 | void play_dead_common(void) |
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c index 1884a8d12bfa..dee1ff7cba58 100644 --- a/arch/x86/kernel/sys_i386_32.c +++ b/arch/x86/kernel/sys_i386_32.c | |||
| @@ -24,31 +24,6 @@ | |||
| 24 | 24 | ||
| 25 | #include <asm/syscalls.h> | 25 | #include <asm/syscalls.h> |
| 26 | 26 | ||
| 27 | asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | ||
| 28 | unsigned long prot, unsigned long flags, | ||
| 29 | unsigned long fd, unsigned long pgoff) | ||
| 30 | { | ||
| 31 | int error = -EBADF; | ||
| 32 | struct file *file = NULL; | ||
| 33 | struct mm_struct *mm = current->mm; | ||
| 34 | |||
| 35 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
| 36 | if (!(flags & MAP_ANONYMOUS)) { | ||
| 37 | file = fget(fd); | ||
| 38 | if (!file) | ||
| 39 | goto out; | ||
| 40 | } | ||
| 41 | |||
| 42 | down_write(&mm->mmap_sem); | ||
| 43 | error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
| 44 | up_write(&mm->mmap_sem); | ||
| 45 | |||
| 46 | if (file) | ||
| 47 | fput(file); | ||
| 48 | out: | ||
| 49 | return error; | ||
| 50 | } | ||
| 51 | |||
| 52 | /* | 27 | /* |
| 53 | * Perform the select(nd, in, out, ex, tv) and mmap() system | 28 | * Perform the select(nd, in, out, ex, tv) and mmap() system |
| 54 | * calls. Linux/i386 didn't use to be able to handle more than | 29 | * calls. Linux/i386 didn't use to be able to handle more than |
| @@ -77,7 +52,7 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg) | |||
| 77 | if (a.offset & ~PAGE_MASK) | 52 | if (a.offset & ~PAGE_MASK) |
| 78 | goto out; | 53 | goto out; |
| 79 | 54 | ||
| 80 | err = sys_mmap2(a.addr, a.len, a.prot, a.flags, | 55 | err = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, |
| 81 | a.fd, a.offset >> PAGE_SHIFT); | 56 | a.fd, a.offset >> PAGE_SHIFT); |
| 82 | out: | 57 | out: |
| 83 | return err; | 58 | return err; |
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 45e00eb09c3a..8aa2057efd12 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
| @@ -23,26 +23,11 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, | |||
| 23 | unsigned long, fd, unsigned long, off) | 23 | unsigned long, fd, unsigned long, off) |
| 24 | { | 24 | { |
| 25 | long error; | 25 | long error; |
| 26 | struct file *file; | ||
| 27 | |||
| 28 | error = -EINVAL; | 26 | error = -EINVAL; |
| 29 | if (off & ~PAGE_MASK) | 27 | if (off & ~PAGE_MASK) |
| 30 | goto out; | 28 | goto out; |
| 31 | 29 | ||
| 32 | error = -EBADF; | 30 | error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); |
| 33 | file = NULL; | ||
| 34 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
| 35 | if (!(flags & MAP_ANONYMOUS)) { | ||
| 36 | file = fget(fd); | ||
| 37 | if (!file) | ||
| 38 | goto out; | ||
| 39 | } | ||
| 40 | down_write(¤t->mm->mmap_sem); | ||
| 41 | error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT); | ||
| 42 | up_write(¤t->mm->mmap_sem); | ||
| 43 | |||
| 44 | if (file) | ||
| 45 | fput(file); | ||
| 46 | out: | 31 | out: |
| 47 | return error; | 32 | return error; |
| 48 | } | 33 | } |
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index 70c2125d55b9..15228b5d3eb7 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S | |||
| @@ -191,7 +191,7 @@ ENTRY(sys_call_table) | |||
| 191 | .long sys_ni_syscall /* reserved for streams2 */ | 191 | .long sys_ni_syscall /* reserved for streams2 */ |
| 192 | .long ptregs_vfork /* 190 */ | 192 | .long ptregs_vfork /* 190 */ |
| 193 | .long sys_getrlimit | 193 | .long sys_getrlimit |
| 194 | .long sys_mmap2 | 194 | .long sys_mmap_pgoff |
| 195 | .long sys_truncate64 | 195 | .long sys_truncate64 |
| 196 | .long sys_ftruncate64 | 196 | .long sys_ftruncate64 |
| 197 | .long sys_stat64 /* 195 */ | 197 | .long sys_stat64 /* 195 */ |
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index cd022121cab6..c652ef62742d 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c | |||
| @@ -12,21 +12,19 @@ | |||
| 12 | #endif | 12 | #endif |
| 13 | 13 | ||
| 14 | /* ready for x86_64 and x86 */ | 14 | /* ready for x86_64 and x86 */ |
| 15 | unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE); | 15 | unsigned char *__trampinitdata trampoline_base; |
| 16 | 16 | ||
| 17 | void __init reserve_trampoline_memory(void) | 17 | void __init reserve_trampoline_memory(void) |
| 18 | { | 18 | { |
| 19 | #ifdef CONFIG_X86_32 | 19 | unsigned long mem; |
| 20 | /* | 20 | |
| 21 | * But first pinch a few for the stack/trampoline stuff | ||
| 22 | * FIXME: Don't need the extra page at 4K, but need to fix | ||
| 23 | * trampoline before removing it. (see the GDT stuff) | ||
| 24 | */ | ||
| 25 | reserve_early(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE"); | ||
| 26 | #endif | ||
| 27 | /* Has to be in very low memory so we can execute real-mode AP code. */ | 21 | /* Has to be in very low memory so we can execute real-mode AP code. */ |
| 28 | reserve_early(TRAMPOLINE_BASE, TRAMPOLINE_BASE + TRAMPOLINE_SIZE, | 22 | mem = find_e820_area(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE); |
| 29 | "TRAMPOLINE"); | 23 | if (mem == -1L) |
| 24 | panic("Cannot allocate trampoline\n"); | ||
| 25 | |||
| 26 | trampoline_base = __va(mem); | ||
| 27 | reserve_early(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE"); | ||
| 30 | } | 28 | } |
| 31 | 29 | ||
| 32 | /* | 30 | /* |
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index fab7440c9bb2..296aba49472a 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
| @@ -29,6 +29,8 @@ | |||
| 29 | * Based on QEMU and Xen. | 29 | * Based on QEMU and Xen. |
| 30 | */ | 30 | */ |
| 31 | 31 | ||
| 32 | #define pr_fmt(fmt) "pit: " fmt | ||
| 33 | |||
| 32 | #include <linux/kvm_host.h> | 34 | #include <linux/kvm_host.h> |
| 33 | 35 | ||
| 34 | #include "irq.h" | 36 | #include "irq.h" |
| @@ -262,7 +264,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) | |||
| 262 | 264 | ||
| 263 | static void destroy_pit_timer(struct kvm_timer *pt) | 265 | static void destroy_pit_timer(struct kvm_timer *pt) |
| 264 | { | 266 | { |
| 265 | pr_debug("pit: execute del timer!\n"); | 267 | pr_debug("execute del timer!\n"); |
| 266 | hrtimer_cancel(&pt->timer); | 268 | hrtimer_cancel(&pt->timer); |
| 267 | } | 269 | } |
| 268 | 270 | ||
| @@ -284,7 +286,7 @@ static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period) | |||
| 284 | 286 | ||
| 285 | interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); | 287 | interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); |
| 286 | 288 | ||
| 287 | pr_debug("pit: create pit timer, interval is %llu nsec\n", interval); | 289 | pr_debug("create pit timer, interval is %llu nsec\n", interval); |
| 288 | 290 | ||
| 289 | /* TODO The new value only affected after the retriggered */ | 291 | /* TODO The new value only affected after the retriggered */ |
| 290 | hrtimer_cancel(&pt->timer); | 292 | hrtimer_cancel(&pt->timer); |
| @@ -309,7 +311,7 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) | |||
| 309 | 311 | ||
| 310 | WARN_ON(!mutex_is_locked(&ps->lock)); | 312 | WARN_ON(!mutex_is_locked(&ps->lock)); |
| 311 | 313 | ||
| 312 | pr_debug("pit: load_count val is %d, channel is %d\n", val, channel); | 314 | pr_debug("load_count val is %d, channel is %d\n", val, channel); |
| 313 | 315 | ||
| 314 | /* | 316 | /* |
| 315 | * The largest possible initial count is 0; this is equivalent | 317 | * The largest possible initial count is 0; this is equivalent |
| @@ -395,8 +397,8 @@ static int pit_ioport_write(struct kvm_io_device *this, | |||
| 395 | mutex_lock(&pit_state->lock); | 397 | mutex_lock(&pit_state->lock); |
| 396 | 398 | ||
| 397 | if (val != 0) | 399 | if (val != 0) |
| 398 | pr_debug("pit: write addr is 0x%x, len is %d, val is 0x%x\n", | 400 | pr_debug("write addr is 0x%x, len is %d, val is 0x%x\n", |
| 399 | (unsigned int)addr, len, val); | 401 | (unsigned int)addr, len, val); |
| 400 | 402 | ||
| 401 | if (addr == 3) { | 403 | if (addr == 3) { |
| 402 | channel = val >> 6; | 404 | channel = val >> 6; |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 3de0b37ec038..1d9b33843c80 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -316,7 +316,7 @@ static void svm_hardware_disable(void *garbage) | |||
| 316 | static int svm_hardware_enable(void *garbage) | 316 | static int svm_hardware_enable(void *garbage) |
| 317 | { | 317 | { |
| 318 | 318 | ||
| 319 | struct svm_cpu_data *svm_data; | 319 | struct svm_cpu_data *sd; |
| 320 | uint64_t efer; | 320 | uint64_t efer; |
| 321 | struct descriptor_table gdt_descr; | 321 | struct descriptor_table gdt_descr; |
| 322 | struct desc_struct *gdt; | 322 | struct desc_struct *gdt; |
| @@ -331,63 +331,61 @@ static int svm_hardware_enable(void *garbage) | |||
| 331 | me); | 331 | me); |
| 332 | return -EINVAL; | 332 | return -EINVAL; |
| 333 | } | 333 | } |
| 334 | svm_data = per_cpu(svm_data, me); | 334 | sd = per_cpu(svm_data, me); |
| 335 | 335 | ||
| 336 | if (!svm_data) { | 336 | if (!sd) { |
| 337 | printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n", | 337 | printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n", |
| 338 | me); | 338 | me); |
| 339 | return -EINVAL; | 339 | return -EINVAL; |
| 340 | } | 340 | } |
| 341 | 341 | ||
| 342 | svm_data->asid_generation = 1; | 342 | sd->asid_generation = 1; |
| 343 | svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; | 343 | sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; |
| 344 | svm_data->next_asid = svm_data->max_asid + 1; | 344 | sd->next_asid = sd->max_asid + 1; |
| 345 | 345 | ||
| 346 | kvm_get_gdt(&gdt_descr); | 346 | kvm_get_gdt(&gdt_descr); |
| 347 | gdt = (struct desc_struct *)gdt_descr.base; | 347 | gdt = (struct desc_struct *)gdt_descr.base; |
| 348 | svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); | 348 | sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); |
| 349 | 349 | ||
| 350 | wrmsrl(MSR_EFER, efer | EFER_SVME); | 350 | wrmsrl(MSR_EFER, efer | EFER_SVME); |
| 351 | 351 | ||
| 352 | wrmsrl(MSR_VM_HSAVE_PA, | 352 | wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT); |
| 353 | page_to_pfn(svm_data->save_area) << PAGE_SHIFT); | ||
| 354 | 353 | ||
| 355 | return 0; | 354 | return 0; |
| 356 | } | 355 | } |
| 357 | 356 | ||
| 358 | static void svm_cpu_uninit(int cpu) | 357 | static void svm_cpu_uninit(int cpu) |
| 359 | { | 358 | { |
| 360 | struct svm_cpu_data *svm_data | 359 | struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id()); |
| 361 | = per_cpu(svm_data, raw_smp_processor_id()); | ||
| 362 | 360 | ||
| 363 | if (!svm_data) | 361 | if (!sd) |
| 364 | return; | 362 | return; |
| 365 | 363 | ||
| 366 | per_cpu(svm_data, raw_smp_processor_id()) = NULL; | 364 | per_cpu(svm_data, raw_smp_processor_id()) = NULL; |
| 367 | __free_page(svm_data->save_area); | 365 | __free_page(sd->save_area); |
| 368 | kfree(svm_data); | 366 | kfree(sd); |
| 369 | } | 367 | } |
| 370 | 368 | ||
| 371 | static int svm_cpu_init(int cpu) | 369 | static int svm_cpu_init(int cpu) |
| 372 | { | 370 | { |
| 373 | struct svm_cpu_data *svm_data; | 371 | struct svm_cpu_data *sd; |
| 374 | int r; | 372 | int r; |
| 375 | 373 | ||
| 376 | svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); | 374 | sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); |
| 377 | if (!svm_data) | 375 | if (!sd) |
| 378 | return -ENOMEM; | 376 | return -ENOMEM; |
| 379 | svm_data->cpu = cpu; | 377 | sd->cpu = cpu; |
| 380 | svm_data->save_area = alloc_page(GFP_KERNEL); | 378 | sd->save_area = alloc_page(GFP_KERNEL); |
| 381 | r = -ENOMEM; | 379 | r = -ENOMEM; |
| 382 | if (!svm_data->save_area) | 380 | if (!sd->save_area) |
| 383 | goto err_1; | 381 | goto err_1; |
| 384 | 382 | ||
| 385 | per_cpu(svm_data, cpu) = svm_data; | 383 | per_cpu(svm_data, cpu) = sd; |
| 386 | 384 | ||
| 387 | return 0; | 385 | return 0; |
| 388 | 386 | ||
| 389 | err_1: | 387 | err_1: |
| 390 | kfree(svm_data); | 388 | kfree(sd); |
| 391 | return r; | 389 | return r; |
| 392 | 390 | ||
| 393 | } | 391 | } |
| @@ -1092,16 +1090,16 @@ static void save_host_msrs(struct kvm_vcpu *vcpu) | |||
| 1092 | #endif | 1090 | #endif |
| 1093 | } | 1091 | } |
| 1094 | 1092 | ||
| 1095 | static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data) | 1093 | static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) |
| 1096 | { | 1094 | { |
| 1097 | if (svm_data->next_asid > svm_data->max_asid) { | 1095 | if (sd->next_asid > sd->max_asid) { |
| 1098 | ++svm_data->asid_generation; | 1096 | ++sd->asid_generation; |
| 1099 | svm_data->next_asid = 1; | 1097 | sd->next_asid = 1; |
| 1100 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; | 1098 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; |
| 1101 | } | 1099 | } |
| 1102 | 1100 | ||
| 1103 | svm->asid_generation = svm_data->asid_generation; | 1101 | svm->asid_generation = sd->asid_generation; |
| 1104 | svm->vmcb->control.asid = svm_data->next_asid++; | 1102 | svm->vmcb->control.asid = sd->next_asid++; |
| 1105 | } | 1103 | } |
| 1106 | 1104 | ||
| 1107 | static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) | 1105 | static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) |
| @@ -2429,8 +2427,8 @@ static void reload_tss(struct kvm_vcpu *vcpu) | |||
| 2429 | { | 2427 | { |
| 2430 | int cpu = raw_smp_processor_id(); | 2428 | int cpu = raw_smp_processor_id(); |
| 2431 | 2429 | ||
| 2432 | struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); | 2430 | struct svm_cpu_data *sd = per_cpu(svm_data, cpu); |
| 2433 | svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */ | 2431 | sd->tss_desc->type = 9; /* available 32/64-bit TSS */ |
| 2434 | load_TR_desc(); | 2432 | load_TR_desc(); |
| 2435 | } | 2433 | } |
| 2436 | 2434 | ||
| @@ -2438,12 +2436,12 @@ static void pre_svm_run(struct vcpu_svm *svm) | |||
| 2438 | { | 2436 | { |
| 2439 | int cpu = raw_smp_processor_id(); | 2437 | int cpu = raw_smp_processor_id(); |
| 2440 | 2438 | ||
| 2441 | struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); | 2439 | struct svm_cpu_data *sd = per_cpu(svm_data, cpu); |
| 2442 | 2440 | ||
| 2443 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; | 2441 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; |
| 2444 | /* FIXME: handle wraparound of asid_generation */ | 2442 | /* FIXME: handle wraparound of asid_generation */ |
| 2445 | if (svm->asid_generation != svm_data->asid_generation) | 2443 | if (svm->asid_generation != sd->asid_generation) |
| 2446 | new_asid(svm, svm_data); | 2444 | new_asid(svm, sd); |
| 2447 | } | 2445 | } |
| 2448 | 2446 | ||
| 2449 | static void svm_inject_nmi(struct kvm_vcpu *vcpu) | 2447 | static void svm_inject_nmi(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index a2d6472895fb..45b20e486c2f 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk | 5 | inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk |
| 6 | inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt | 6 | inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt |
| 7 | quiet_cmd_inat_tables = GEN $@ | 7 | quiet_cmd_inat_tables = GEN $@ |
| 8 | cmd_inat_tables = $(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ | 8 | cmd_inat_tables = $(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ || rm -f $@ |
| 9 | 9 | ||
| 10 | $(obj)/inat-tables.c: $(inat_tables_script) $(inat_tables_maps) | 10 | $(obj)/inat-tables.c: $(inat_tables_script) $(inat_tables_maps) |
| 11 | $(call cmd,inat_tables) | 11 | $(call cmd,inat_tables) |
| @@ -20,7 +20,7 @@ lib-y := delay.o | |||
| 20 | lib-y += thunk_$(BITS).o | 20 | lib-y += thunk_$(BITS).o |
| 21 | lib-y += usercopy_$(BITS).o getuser.o putuser.o | 21 | lib-y += usercopy_$(BITS).o getuser.o putuser.o |
| 22 | lib-y += memcpy_$(BITS).o | 22 | lib-y += memcpy_$(BITS).o |
| 23 | lib-y += insn.o inat.o | 23 | lib-$(CONFIG_KPROBES) += insn.o inat.o |
| 24 | 24 | ||
| 25 | obj-y += msr-reg.o msr-reg-export.o | 25 | obj-y += msr-reg.o msr-reg-export.o |
| 26 | 26 | ||
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c index 41628b104b9e..872834177937 100644 --- a/arch/x86/lib/msr.c +++ b/arch/x86/lib/msr.c | |||
| @@ -7,7 +7,6 @@ struct msr_info { | |||
| 7 | u32 msr_no; | 7 | u32 msr_no; |
| 8 | struct msr reg; | 8 | struct msr reg; |
| 9 | struct msr *msrs; | 9 | struct msr *msrs; |
| 10 | int off; | ||
| 11 | int err; | 10 | int err; |
| 12 | }; | 11 | }; |
| 13 | 12 | ||
| @@ -18,7 +17,7 @@ static void __rdmsr_on_cpu(void *info) | |||
| 18 | int this_cpu = raw_smp_processor_id(); | 17 | int this_cpu = raw_smp_processor_id(); |
| 19 | 18 | ||
| 20 | if (rv->msrs) | 19 | if (rv->msrs) |
| 21 | reg = &rv->msrs[this_cpu - rv->off]; | 20 | reg = per_cpu_ptr(rv->msrs, this_cpu); |
| 22 | else | 21 | else |
| 23 | reg = &rv->reg; | 22 | reg = &rv->reg; |
| 24 | 23 | ||
| @@ -32,7 +31,7 @@ static void __wrmsr_on_cpu(void *info) | |||
| 32 | int this_cpu = raw_smp_processor_id(); | 31 | int this_cpu = raw_smp_processor_id(); |
| 33 | 32 | ||
| 34 | if (rv->msrs) | 33 | if (rv->msrs) |
| 35 | reg = &rv->msrs[this_cpu - rv->off]; | 34 | reg = per_cpu_ptr(rv->msrs, this_cpu); |
| 36 | else | 35 | else |
| 37 | reg = &rv->reg; | 36 | reg = &rv->reg; |
| 38 | 37 | ||
| @@ -80,7 +79,6 @@ static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no, | |||
| 80 | 79 | ||
| 81 | memset(&rv, 0, sizeof(rv)); | 80 | memset(&rv, 0, sizeof(rv)); |
| 82 | 81 | ||
| 83 | rv.off = cpumask_first(mask); | ||
| 84 | rv.msrs = msrs; | 82 | rv.msrs = msrs; |
| 85 | rv.msr_no = msr_no; | 83 | rv.msr_no = msr_no; |
| 86 | 84 | ||
| @@ -120,6 +118,26 @@ void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) | |||
| 120 | } | 118 | } |
| 121 | EXPORT_SYMBOL(wrmsr_on_cpus); | 119 | EXPORT_SYMBOL(wrmsr_on_cpus); |
| 122 | 120 | ||
| 121 | struct msr *msrs_alloc(void) | ||
| 122 | { | ||
| 123 | struct msr *msrs = NULL; | ||
| 124 | |||
| 125 | msrs = alloc_percpu(struct msr); | ||
| 126 | if (!msrs) { | ||
| 127 | pr_warning("%s: error allocating msrs\n", __func__); | ||
| 128 | return NULL; | ||
| 129 | } | ||
| 130 | |||
| 131 | return msrs; | ||
| 132 | } | ||
| 133 | EXPORT_SYMBOL(msrs_alloc); | ||
| 134 | |||
| 135 | void msrs_free(struct msr *msrs) | ||
| 136 | { | ||
| 137 | free_percpu(msrs); | ||
| 138 | } | ||
| 139 | EXPORT_SYMBOL(msrs_free); | ||
| 140 | |||
| 123 | /* These "safe" variants are slower and should be used when the target MSR | 141 | /* These "safe" variants are slower and should be used when the target MSR |
| 124 | may not actually exist. */ | 142 | may not actually exist. */ |
| 125 | static void __rdmsr_safe_on_cpu(void *info) | 143 | static void __rdmsr_safe_on_cpu(void *info) |
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index 07bcc309cfda..c0f6198565eb 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c | |||
| @@ -5,6 +5,8 @@ | |||
| 5 | * 2008 Pekka Paalanen <pq@iki.fi> | 5 | * 2008 Pekka Paalanen <pq@iki.fi> |
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 9 | |||
| 8 | #include <linux/list.h> | 10 | #include <linux/list.h> |
| 9 | #include <linux/rculist.h> | 11 | #include <linux/rculist.h> |
| 10 | #include <linux/spinlock.h> | 12 | #include <linux/spinlock.h> |
| @@ -136,7 +138,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear) | |||
| 136 | pte_t *pte = lookup_address(f->page, &level); | 138 | pte_t *pte = lookup_address(f->page, &level); |
| 137 | 139 | ||
| 138 | if (!pte) { | 140 | if (!pte) { |
| 139 | pr_err("kmmio: no pte for page 0x%08lx\n", f->page); | 141 | pr_err("no pte for page 0x%08lx\n", f->page); |
| 140 | return -1; | 142 | return -1; |
| 141 | } | 143 | } |
| 142 | 144 | ||
| @@ -148,7 +150,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear) | |||
| 148 | clear_pte_presence(pte, clear, &f->old_presence); | 150 | clear_pte_presence(pte, clear, &f->old_presence); |
| 149 | break; | 151 | break; |
| 150 | default: | 152 | default: |
| 151 | pr_err("kmmio: unexpected page level 0x%x.\n", level); | 153 | pr_err("unexpected page level 0x%x.\n", level); |
| 152 | return -1; | 154 | return -1; |
| 153 | } | 155 | } |
| 154 | 156 | ||
| @@ -170,13 +172,14 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear) | |||
| 170 | static int arm_kmmio_fault_page(struct kmmio_fault_page *f) | 172 | static int arm_kmmio_fault_page(struct kmmio_fault_page *f) |
| 171 | { | 173 | { |
| 172 | int ret; | 174 | int ret; |
| 173 | WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n"); | 175 | WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n")); |
| 174 | if (f->armed) { | 176 | if (f->armed) { |
| 175 | pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n", | 177 | pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n", |
| 176 | f->page, f->count, !!f->old_presence); | 178 | f->page, f->count, !!f->old_presence); |
| 177 | } | 179 | } |
| 178 | ret = clear_page_presence(f, true); | 180 | ret = clear_page_presence(f, true); |
| 179 | WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page); | 181 | WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"), |
| 182 | f->page); | ||
| 180 | f->armed = true; | 183 | f->armed = true; |
| 181 | return ret; | 184 | return ret; |
| 182 | } | 185 | } |
| @@ -240,24 +243,21 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr) | |||
| 240 | * condition needs handling by do_page_fault(), the | 243 | * condition needs handling by do_page_fault(), the |
| 241 | * page really not being present is the most common. | 244 | * page really not being present is the most common. |
| 242 | */ | 245 | */ |
| 243 | pr_debug("kmmio: secondary hit for 0x%08lx CPU %d.\n", | 246 | pr_debug("secondary hit for 0x%08lx CPU %d.\n", |
| 244 | addr, smp_processor_id()); | 247 | addr, smp_processor_id()); |
| 245 | 248 | ||
| 246 | if (!faultpage->old_presence) | 249 | if (!faultpage->old_presence) |
| 247 | pr_info("kmmio: unexpected secondary hit for " | 250 | pr_info("unexpected secondary hit for address 0x%08lx on CPU %d.\n", |
| 248 | "address 0x%08lx on CPU %d.\n", addr, | 251 | addr, smp_processor_id()); |
| 249 | smp_processor_id()); | ||
| 250 | } else { | 252 | } else { |
| 251 | /* | 253 | /* |
| 252 | * Prevent overwriting already in-flight context. | 254 | * Prevent overwriting already in-flight context. |
| 253 | * This should not happen, let's hope disarming at | 255 | * This should not happen, let's hope disarming at |
| 254 | * least prevents a panic. | 256 | * least prevents a panic. |
| 255 | */ | 257 | */ |
| 256 | pr_emerg("kmmio: recursive probe hit on CPU %d, " | 258 | pr_emerg("recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n", |
| 257 | "for address 0x%08lx. Ignoring.\n", | 259 | smp_processor_id(), addr); |
| 258 | smp_processor_id(), addr); | 260 | pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr); |
| 259 | pr_emerg("kmmio: previous hit was at 0x%08lx.\n", | ||
| 260 | ctx->addr); | ||
| 261 | disarm_kmmio_fault_page(faultpage); | 261 | disarm_kmmio_fault_page(faultpage); |
| 262 | } | 262 | } |
| 263 | goto no_kmmio_ctx; | 263 | goto no_kmmio_ctx; |
| @@ -316,8 +316,8 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs) | |||
| 316 | * something external causing them (f.e. using a debugger while | 316 | * something external causing them (f.e. using a debugger while |
| 317 | * mmio tracing enabled), or erroneous behaviour | 317 | * mmio tracing enabled), or erroneous behaviour |
| 318 | */ | 318 | */ |
| 319 | pr_warning("kmmio: unexpected debug trap on CPU %d.\n", | 319 | pr_warning("unexpected debug trap on CPU %d.\n", |
| 320 | smp_processor_id()); | 320 | smp_processor_id()); |
| 321 | goto out; | 321 | goto out; |
| 322 | } | 322 | } |
| 323 | 323 | ||
| @@ -425,7 +425,7 @@ int register_kmmio_probe(struct kmmio_probe *p) | |||
| 425 | list_add_rcu(&p->list, &kmmio_probes); | 425 | list_add_rcu(&p->list, &kmmio_probes); |
| 426 | while (size < size_lim) { | 426 | while (size < size_lim) { |
| 427 | if (add_kmmio_fault_page(p->addr + size)) | 427 | if (add_kmmio_fault_page(p->addr + size)) |
| 428 | pr_err("kmmio: Unable to set page fault.\n"); | 428 | pr_err("Unable to set page fault.\n"); |
| 429 | size += PAGE_SIZE; | 429 | size += PAGE_SIZE; |
| 430 | } | 430 | } |
| 431 | out: | 431 | out: |
| @@ -490,7 +490,7 @@ static void remove_kmmio_fault_pages(struct rcu_head *head) | |||
| 490 | * 2. remove_kmmio_fault_pages() | 490 | * 2. remove_kmmio_fault_pages() |
| 491 | * Remove the pages from kmmio_page_table. | 491 | * Remove the pages from kmmio_page_table. |
| 492 | * 3. rcu_free_kmmio_fault_pages() | 492 | * 3. rcu_free_kmmio_fault_pages() |
| 493 | * Actally free the kmmio_fault_page structs as with RCU. | 493 | * Actually free the kmmio_fault_page structs as with RCU. |
| 494 | */ | 494 | */ |
| 495 | void unregister_kmmio_probe(struct kmmio_probe *p) | 495 | void unregister_kmmio_probe(struct kmmio_probe *p) |
| 496 | { | 496 | { |
| @@ -511,7 +511,7 @@ void unregister_kmmio_probe(struct kmmio_probe *p) | |||
| 511 | 511 | ||
| 512 | drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC); | 512 | drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC); |
| 513 | if (!drelease) { | 513 | if (!drelease) { |
| 514 | pr_crit("kmmio: leaking kmmio_fault_page objects.\n"); | 514 | pr_crit("leaking kmmio_fault_page objects.\n"); |
| 515 | return; | 515 | return; |
| 516 | } | 516 | } |
| 517 | drelease->release_list = release_list; | 517 | drelease->release_list = release_list; |
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c index 132772a8ec57..34a3291ca103 100644 --- a/arch/x86/mm/mmio-mod.c +++ b/arch/x86/mm/mmio-mod.c | |||
| @@ -19,6 +19,9 @@ | |||
| 19 | * | 19 | * |
| 20 | * Derived from the read-mod example from relay-examples by Tom Zanussi. | 20 | * Derived from the read-mod example from relay-examples by Tom Zanussi. |
| 21 | */ | 21 | */ |
| 22 | |||
| 23 | #define pr_fmt(fmt) "mmiotrace: " fmt | ||
| 24 | |||
| 22 | #define DEBUG 1 | 25 | #define DEBUG 1 |
| 23 | 26 | ||
| 24 | #include <linux/module.h> | 27 | #include <linux/module.h> |
| @@ -36,8 +39,6 @@ | |||
| 36 | 39 | ||
| 37 | #include "pf_in.h" | 40 | #include "pf_in.h" |
| 38 | 41 | ||
| 39 | #define NAME "mmiotrace: " | ||
| 40 | |||
| 41 | struct trap_reason { | 42 | struct trap_reason { |
| 42 | unsigned long addr; | 43 | unsigned long addr; |
| 43 | unsigned long ip; | 44 | unsigned long ip; |
| @@ -96,17 +97,18 @@ static void print_pte(unsigned long address) | |||
| 96 | pte_t *pte = lookup_address(address, &level); | 97 | pte_t *pte = lookup_address(address, &level); |
| 97 | 98 | ||
| 98 | if (!pte) { | 99 | if (!pte) { |
| 99 | pr_err(NAME "Error in %s: no pte for page 0x%08lx\n", | 100 | pr_err("Error in %s: no pte for page 0x%08lx\n", |
| 100 | __func__, address); | 101 | __func__, address); |
| 101 | return; | 102 | return; |
| 102 | } | 103 | } |
| 103 | 104 | ||
| 104 | if (level == PG_LEVEL_2M) { | 105 | if (level == PG_LEVEL_2M) { |
| 105 | pr_emerg(NAME "4MB pages are not currently supported: " | 106 | pr_emerg("4MB pages are not currently supported: 0x%08lx\n", |
| 106 | "0x%08lx\n", address); | 107 | address); |
| 107 | BUG(); | 108 | BUG(); |
| 108 | } | 109 | } |
| 109 | pr_info(NAME "pte for 0x%lx: 0x%llx 0x%llx\n", address, | 110 | pr_info("pte for 0x%lx: 0x%llx 0x%llx\n", |
| 111 | address, | ||
| 110 | (unsigned long long)pte_val(*pte), | 112 | (unsigned long long)pte_val(*pte), |
| 111 | (unsigned long long)pte_val(*pte) & _PAGE_PRESENT); | 113 | (unsigned long long)pte_val(*pte) & _PAGE_PRESENT); |
| 112 | } | 114 | } |
| @@ -118,22 +120,21 @@ static void print_pte(unsigned long address) | |||
| 118 | static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr) | 120 | static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr) |
| 119 | { | 121 | { |
| 120 | const struct trap_reason *my_reason = &get_cpu_var(pf_reason); | 122 | const struct trap_reason *my_reason = &get_cpu_var(pf_reason); |
| 121 | pr_emerg(NAME "unexpected fault for address: 0x%08lx, " | 123 | pr_emerg("unexpected fault for address: 0x%08lx, last fault for address: 0x%08lx\n", |
| 122 | "last fault for address: 0x%08lx\n", | 124 | addr, my_reason->addr); |
| 123 | addr, my_reason->addr); | ||
| 124 | print_pte(addr); | 125 | print_pte(addr); |
| 125 | print_symbol(KERN_EMERG "faulting IP is at %s\n", regs->ip); | 126 | print_symbol(KERN_EMERG "faulting IP is at %s\n", regs->ip); |
| 126 | print_symbol(KERN_EMERG "last faulting IP was at %s\n", my_reason->ip); | 127 | print_symbol(KERN_EMERG "last faulting IP was at %s\n", my_reason->ip); |
| 127 | #ifdef __i386__ | 128 | #ifdef __i386__ |
| 128 | pr_emerg("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", | 129 | pr_emerg("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", |
| 129 | regs->ax, regs->bx, regs->cx, regs->dx); | 130 | regs->ax, regs->bx, regs->cx, regs->dx); |
| 130 | pr_emerg("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n", | 131 | pr_emerg("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n", |
| 131 | regs->si, regs->di, regs->bp, regs->sp); | 132 | regs->si, regs->di, regs->bp, regs->sp); |
| 132 | #else | 133 | #else |
| 133 | pr_emerg("rax: %016lx rcx: %016lx rdx: %016lx\n", | 134 | pr_emerg("rax: %016lx rcx: %016lx rdx: %016lx\n", |
| 134 | regs->ax, regs->cx, regs->dx); | 135 | regs->ax, regs->cx, regs->dx); |
| 135 | pr_emerg("rsi: %016lx rdi: %016lx rbp: %016lx rsp: %016lx\n", | 136 | pr_emerg("rsi: %016lx rdi: %016lx rbp: %016lx rsp: %016lx\n", |
| 136 | regs->si, regs->di, regs->bp, regs->sp); | 137 | regs->si, regs->di, regs->bp, regs->sp); |
| 137 | #endif | 138 | #endif |
| 138 | put_cpu_var(pf_reason); | 139 | put_cpu_var(pf_reason); |
| 139 | BUG(); | 140 | BUG(); |
| @@ -213,7 +214,7 @@ static void post(struct kmmio_probe *p, unsigned long condition, | |||
| 213 | /* this should always return the active_trace count to 0 */ | 214 | /* this should always return the active_trace count to 0 */ |
| 214 | my_reason->active_traces--; | 215 | my_reason->active_traces--; |
| 215 | if (my_reason->active_traces) { | 216 | if (my_reason->active_traces) { |
| 216 | pr_emerg(NAME "unexpected post handler"); | 217 | pr_emerg("unexpected post handler"); |
| 217 | BUG(); | 218 | BUG(); |
| 218 | } | 219 | } |
| 219 | 220 | ||
| @@ -244,7 +245,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size, | |||
| 244 | }; | 245 | }; |
| 245 | 246 | ||
| 246 | if (!trace) { | 247 | if (!trace) { |
| 247 | pr_err(NAME "kmalloc failed in ioremap\n"); | 248 | pr_err("kmalloc failed in ioremap\n"); |
| 248 | return; | 249 | return; |
| 249 | } | 250 | } |
| 250 | 251 | ||
| @@ -282,8 +283,8 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size, | |||
| 282 | if (!is_enabled()) /* recheck and proper locking in *_core() */ | 283 | if (!is_enabled()) /* recheck and proper locking in *_core() */ |
| 283 | return; | 284 | return; |
| 284 | 285 | ||
| 285 | pr_debug(NAME "ioremap_*(0x%llx, 0x%lx) = %p\n", | 286 | pr_debug("ioremap_*(0x%llx, 0x%lx) = %p\n", |
| 286 | (unsigned long long)offset, size, addr); | 287 | (unsigned long long)offset, size, addr); |
| 287 | if ((filter_offset) && (offset != filter_offset)) | 288 | if ((filter_offset) && (offset != filter_offset)) |
| 288 | return; | 289 | return; |
| 289 | ioremap_trace_core(offset, size, addr); | 290 | ioremap_trace_core(offset, size, addr); |
| @@ -301,7 +302,7 @@ static void iounmap_trace_core(volatile void __iomem *addr) | |||
| 301 | struct remap_trace *tmp; | 302 | struct remap_trace *tmp; |
| 302 | struct remap_trace *found_trace = NULL; | 303 | struct remap_trace *found_trace = NULL; |
| 303 | 304 | ||
| 304 | pr_debug(NAME "Unmapping %p.\n", addr); | 305 | pr_debug("Unmapping %p.\n", addr); |
| 305 | 306 | ||
| 306 | spin_lock_irq(&trace_lock); | 307 | spin_lock_irq(&trace_lock); |
| 307 | if (!is_enabled()) | 308 | if (!is_enabled()) |
| @@ -363,9 +364,8 @@ static void clear_trace_list(void) | |||
| 363 | * Caller also ensures is_enabled() cannot change. | 364 | * Caller also ensures is_enabled() cannot change. |
| 364 | */ | 365 | */ |
| 365 | list_for_each_entry(trace, &trace_list, list) { | 366 | list_for_each_entry(trace, &trace_list, list) { |
| 366 | pr_notice(NAME "purging non-iounmapped " | 367 | pr_notice("purging non-iounmapped trace @0x%08lx, size 0x%lx.\n", |
| 367 | "trace @0x%08lx, size 0x%lx.\n", | 368 | trace->probe.addr, trace->probe.len); |
| 368 | trace->probe.addr, trace->probe.len); | ||
| 369 | if (!nommiotrace) | 369 | if (!nommiotrace) |
| 370 | unregister_kmmio_probe(&trace->probe); | 370 | unregister_kmmio_probe(&trace->probe); |
| 371 | } | 371 | } |
| @@ -387,7 +387,7 @@ static void enter_uniprocessor(void) | |||
| 387 | 387 | ||
| 388 | if (downed_cpus == NULL && | 388 | if (downed_cpus == NULL && |
| 389 | !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) { | 389 | !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) { |
| 390 | pr_notice(NAME "Failed to allocate mask\n"); | 390 | pr_notice("Failed to allocate mask\n"); |
| 391 | goto out; | 391 | goto out; |
| 392 | } | 392 | } |
| 393 | 393 | ||
| @@ -395,20 +395,19 @@ static void enter_uniprocessor(void) | |||
| 395 | cpumask_copy(downed_cpus, cpu_online_mask); | 395 | cpumask_copy(downed_cpus, cpu_online_mask); |
| 396 | cpumask_clear_cpu(cpumask_first(cpu_online_mask), downed_cpus); | 396 | cpumask_clear_cpu(cpumask_first(cpu_online_mask), downed_cpus); |
| 397 | if (num_online_cpus() > 1) | 397 | if (num_online_cpus() > 1) |
| 398 | pr_notice(NAME "Disabling non-boot CPUs...\n"); | 398 | pr_notice("Disabling non-boot CPUs...\n"); |
| 399 | put_online_cpus(); | 399 | put_online_cpus(); |
| 400 | 400 | ||
| 401 | for_each_cpu(cpu, downed_cpus) { | 401 | for_each_cpu(cpu, downed_cpus) { |
| 402 | err = cpu_down(cpu); | 402 | err = cpu_down(cpu); |
| 403 | if (!err) | 403 | if (!err) |
| 404 | pr_info(NAME "CPU%d is down.\n", cpu); | 404 | pr_info("CPU%d is down.\n", cpu); |
| 405 | else | 405 | else |
| 406 | pr_err(NAME "Error taking CPU%d down: %d\n", cpu, err); | 406 | pr_err("Error taking CPU%d down: %d\n", cpu, err); |
| 407 | } | 407 | } |
| 408 | out: | 408 | out: |
| 409 | if (num_online_cpus() > 1) | 409 | if (num_online_cpus() > 1) |
| 410 | pr_warning(NAME "multiple CPUs still online, " | 410 | pr_warning("multiple CPUs still online, may miss events.\n"); |
| 411 | "may miss events.\n"); | ||
| 412 | } | 411 | } |
| 413 | 412 | ||
| 414 | /* __ref because leave_uniprocessor calls cpu_up which is __cpuinit, | 413 | /* __ref because leave_uniprocessor calls cpu_up which is __cpuinit, |
| @@ -420,13 +419,13 @@ static void __ref leave_uniprocessor(void) | |||
| 420 | 419 | ||
| 421 | if (downed_cpus == NULL || cpumask_weight(downed_cpus) == 0) | 420 | if (downed_cpus == NULL || cpumask_weight(downed_cpus) == 0) |
| 422 | return; | 421 | return; |
| 423 | pr_notice(NAME "Re-enabling CPUs...\n"); | 422 | pr_notice("Re-enabling CPUs...\n"); |
| 424 | for_each_cpu(cpu, downed_cpus) { | 423 | for_each_cpu(cpu, downed_cpus) { |
| 425 | err = cpu_up(cpu); | 424 | err = cpu_up(cpu); |
| 426 | if (!err) | 425 | if (!err) |
| 427 | pr_info(NAME "enabled CPU%d.\n", cpu); | 426 | pr_info("enabled CPU%d.\n", cpu); |
| 428 | else | 427 | else |
| 429 | pr_err(NAME "cannot re-enable CPU%d: %d\n", cpu, err); | 428 | pr_err("cannot re-enable CPU%d: %d\n", cpu, err); |
| 430 | } | 429 | } |
| 431 | } | 430 | } |
| 432 | 431 | ||
| @@ -434,8 +433,8 @@ static void __ref leave_uniprocessor(void) | |||
| 434 | static void enter_uniprocessor(void) | 433 | static void enter_uniprocessor(void) |
| 435 | { | 434 | { |
| 436 | if (num_online_cpus() > 1) | 435 | if (num_online_cpus() > 1) |
| 437 | pr_warning(NAME "multiple CPUs are online, may miss events. " | 436 | pr_warning("multiple CPUs are online, may miss events. " |
| 438 | "Suggest booting with maxcpus=1 kernel argument.\n"); | 437 | "Suggest booting with maxcpus=1 kernel argument.\n"); |
| 439 | } | 438 | } |
| 440 | 439 | ||
| 441 | static void leave_uniprocessor(void) | 440 | static void leave_uniprocessor(void) |
| @@ -450,13 +449,13 @@ void enable_mmiotrace(void) | |||
| 450 | goto out; | 449 | goto out; |
| 451 | 450 | ||
| 452 | if (nommiotrace) | 451 | if (nommiotrace) |
| 453 | pr_info(NAME "MMIO tracing disabled.\n"); | 452 | pr_info("MMIO tracing disabled.\n"); |
| 454 | kmmio_init(); | 453 | kmmio_init(); |
| 455 | enter_uniprocessor(); | 454 | enter_uniprocessor(); |
| 456 | spin_lock_irq(&trace_lock); | 455 | spin_lock_irq(&trace_lock); |
| 457 | atomic_inc(&mmiotrace_enabled); | 456 | atomic_inc(&mmiotrace_enabled); |
| 458 | spin_unlock_irq(&trace_lock); | 457 | spin_unlock_irq(&trace_lock); |
| 459 | pr_info(NAME "enabled.\n"); | 458 | pr_info("enabled.\n"); |
| 460 | out: | 459 | out: |
| 461 | mutex_unlock(&mmiotrace_mutex); | 460 | mutex_unlock(&mmiotrace_mutex); |
| 462 | } | 461 | } |
| @@ -475,7 +474,7 @@ void disable_mmiotrace(void) | |||
| 475 | clear_trace_list(); /* guarantees: no more kmmio callbacks */ | 474 | clear_trace_list(); /* guarantees: no more kmmio callbacks */ |
| 476 | leave_uniprocessor(); | 475 | leave_uniprocessor(); |
| 477 | kmmio_cleanup(); | 476 | kmmio_cleanup(); |
| 478 | pr_info(NAME "disabled.\n"); | 477 | pr_info("disabled.\n"); |
| 479 | out: | 478 | out: |
| 480 | mutex_unlock(&mmiotrace_mutex); | 479 | mutex_unlock(&mmiotrace_mutex); |
| 481 | } | 480 | } |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 66b55d6e69ed..ae9648eb1c7f 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
| @@ -704,9 +704,8 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | |||
| 704 | if (!range_is_allowed(pfn, size)) | 704 | if (!range_is_allowed(pfn, size)) |
| 705 | return 0; | 705 | return 0; |
| 706 | 706 | ||
| 707 | if (file->f_flags & O_SYNC) { | 707 | if (file->f_flags & O_DSYNC) |
| 708 | flags = _PAGE_CACHE_UC_MINUS; | 708 | flags = _PAGE_CACHE_UC_MINUS; |
| 709 | } | ||
| 710 | 709 | ||
| 711 | #ifdef CONFIG_X86_32 | 710 | #ifdef CONFIG_X86_32 |
| 712 | /* | 711 | /* |
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile index d49202e740ea..564b008a51c7 100644 --- a/arch/x86/pci/Makefile +++ b/arch/x86/pci/Makefile | |||
| @@ -15,3 +15,8 @@ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o | |||
| 15 | 15 | ||
| 16 | obj-y += common.o early.o | 16 | obj-y += common.o early.o |
| 17 | obj-y += amd_bus.o | 17 | obj-y += amd_bus.o |
| 18 | obj-$(CONFIG_X86_64) += bus_numa.o intel_bus.o | ||
| 19 | |||
| 20 | ifeq ($(CONFIG_PCI_DEBUG),y) | ||
| 21 | EXTRA_CFLAGS += -DDEBUG | ||
| 22 | endif | ||
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 1014eb4bfc37..959e548a7039 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <asm/pci_x86.h> | 7 | #include <asm/pci_x86.h> |
| 8 | 8 | ||
| 9 | struct pci_root_info { | 9 | struct pci_root_info { |
| 10 | struct acpi_device *bridge; | ||
| 10 | char *name; | 11 | char *name; |
| 11 | unsigned int res_num; | 12 | unsigned int res_num; |
| 12 | struct resource *res; | 13 | struct resource *res; |
| @@ -58,6 +59,30 @@ bus_has_transparent_bridge(struct pci_bus *bus) | |||
| 58 | return false; | 59 | return false; |
| 59 | } | 60 | } |
| 60 | 61 | ||
| 62 | static void | ||
| 63 | align_resource(struct acpi_device *bridge, struct resource *res) | ||
| 64 | { | ||
| 65 | int align = (res->flags & IORESOURCE_MEM) ? 16 : 4; | ||
| 66 | |||
| 67 | /* | ||
| 68 | * Host bridge windows are not BARs, but the decoders on the PCI side | ||
| 69 | * that claim this address space have starting alignment and length | ||
| 70 | * constraints, so fix any obvious BIOS goofs. | ||
| 71 | */ | ||
| 72 | if (!IS_ALIGNED(res->start, align)) { | ||
| 73 | dev_printk(KERN_DEBUG, &bridge->dev, | ||
| 74 | "host bridge window %pR invalid; " | ||
| 75 | "aligning start to %d-byte boundary\n", res, align); | ||
| 76 | res->start &= ~(align - 1); | ||
| 77 | } | ||
| 78 | if (!IS_ALIGNED(res->end + 1, align)) { | ||
| 79 | dev_printk(KERN_DEBUG, &bridge->dev, | ||
| 80 | "host bridge window %pR invalid; " | ||
| 81 | "aligning end to %d-byte boundary\n", res, align); | ||
| 82 | res->end = ALIGN(res->end, align) - 1; | ||
| 83 | } | ||
| 84 | } | ||
| 85 | |||
| 61 | static acpi_status | 86 | static acpi_status |
| 62 | setup_resource(struct acpi_resource *acpi_res, void *data) | 87 | setup_resource(struct acpi_resource *acpi_res, void *data) |
| 63 | { | 88 | { |
| @@ -91,11 +116,12 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
| 91 | start = addr.minimum + addr.translation_offset; | 116 | start = addr.minimum + addr.translation_offset; |
| 92 | end = start + addr.address_length - 1; | 117 | end = start + addr.address_length - 1; |
| 93 | if (info->res_num >= max_root_bus_resources) { | 118 | if (info->res_num >= max_root_bus_resources) { |
| 94 | printk(KERN_WARNING "PCI: Failed to allocate 0x%lx-0x%lx " | 119 | if (pci_probe & PCI_USE__CRS) |
| 95 | "from %s for %s due to _CRS returning more than " | 120 | printk(KERN_WARNING "PCI: Failed to allocate " |
| 96 | "%d resource descriptors\n", (unsigned long) start, | 121 | "0x%lx-0x%lx from %s for %s due to _CRS " |
| 97 | (unsigned long) end, root->name, info->name, | 122 | "returning more than %d resource descriptors\n", |
| 98 | max_root_bus_resources); | 123 | (unsigned long) start, (unsigned long) end, |
| 124 | root->name, info->name, max_root_bus_resources); | ||
| 99 | return AE_OK; | 125 | return AE_OK; |
| 100 | } | 126 | } |
| 101 | 127 | ||
| @@ -105,14 +131,28 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
| 105 | res->start = start; | 131 | res->start = start; |
| 106 | res->end = end; | 132 | res->end = end; |
| 107 | res->child = NULL; | 133 | res->child = NULL; |
| 134 | align_resource(info->bridge, res); | ||
| 135 | |||
| 136 | if (!(pci_probe & PCI_USE__CRS)) { | ||
| 137 | dev_printk(KERN_DEBUG, &info->bridge->dev, | ||
| 138 | "host bridge window %pR (ignored)\n", res); | ||
| 139 | return AE_OK; | ||
| 140 | } | ||
| 108 | 141 | ||
| 109 | if (insert_resource(root, res)) { | 142 | if (insert_resource(root, res)) { |
| 110 | printk(KERN_ERR "PCI: Failed to allocate 0x%lx-0x%lx " | 143 | dev_err(&info->bridge->dev, |
| 111 | "from %s for %s\n", (unsigned long) res->start, | 144 | "can't allocate host bridge window %pR\n", res); |
| 112 | (unsigned long) res->end, root->name, info->name); | ||
| 113 | } else { | 145 | } else { |
| 114 | info->bus->resource[info->res_num] = res; | 146 | info->bus->resource[info->res_num] = res; |
| 115 | info->res_num++; | 147 | info->res_num++; |
| 148 | if (addr.translation_offset) | ||
| 149 | dev_info(&info->bridge->dev, "host bridge window %pR " | ||
| 150 | "(PCI address [%#llx-%#llx])\n", | ||
| 151 | res, res->start - addr.translation_offset, | ||
| 152 | res->end - addr.translation_offset); | ||
| 153 | else | ||
| 154 | dev_info(&info->bridge->dev, | ||
| 155 | "host bridge window %pR\n", res); | ||
| 116 | } | 156 | } |
| 117 | return AE_OK; | 157 | return AE_OK; |
| 118 | } | 158 | } |
| @@ -124,6 +164,12 @@ get_current_resources(struct acpi_device *device, int busnum, | |||
| 124 | struct pci_root_info info; | 164 | struct pci_root_info info; |
| 125 | size_t size; | 165 | size_t size; |
| 126 | 166 | ||
| 167 | if (!(pci_probe & PCI_USE__CRS)) | ||
| 168 | dev_info(&device->dev, | ||
| 169 | "ignoring host bridge windows from ACPI; " | ||
| 170 | "boot with \"pci=use_crs\" to use them\n"); | ||
| 171 | |||
| 172 | info.bridge = device; | ||
| 127 | info.bus = bus; | 173 | info.bus = bus; |
| 128 | info.res_num = 0; | 174 | info.res_num = 0; |
| 129 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource, | 175 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource, |
| @@ -163,8 +209,9 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do | |||
| 163 | #endif | 209 | #endif |
| 164 | 210 | ||
| 165 | if (domain && !pci_domains_supported) { | 211 | if (domain && !pci_domains_supported) { |
| 166 | printk(KERN_WARNING "PCI: Multiple domains not supported " | 212 | printk(KERN_WARNING "pci_bus %04x:%02x: " |
| 167 | "(dom %d, bus %d)\n", domain, busnum); | 213 | "ignored (multiple domains not supported)\n", |
| 214 | domain, busnum); | ||
| 168 | return NULL; | 215 | return NULL; |
| 169 | } | 216 | } |
| 170 | 217 | ||
| @@ -188,7 +235,8 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do | |||
| 188 | */ | 235 | */ |
| 189 | sd = kzalloc(sizeof(*sd), GFP_KERNEL); | 236 | sd = kzalloc(sizeof(*sd), GFP_KERNEL); |
| 190 | if (!sd) { | 237 | if (!sd) { |
| 191 | printk(KERN_ERR "PCI: OOM, not probing PCI bus %02x\n", busnum); | 238 | printk(KERN_WARNING "pci_bus %04x:%02x: " |
| 239 | "ignored (out of memory)\n", domain, busnum); | ||
| 192 | return NULL; | 240 | return NULL; |
| 193 | } | 241 | } |
| 194 | 242 | ||
| @@ -209,9 +257,7 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do | |||
| 209 | } else { | 257 | } else { |
| 210 | bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd); | 258 | bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd); |
| 211 | if (bus) { | 259 | if (bus) { |
| 212 | if (pci_probe & PCI_USE__CRS) | 260 | get_current_resources(device, busnum, domain, bus); |
| 213 | get_current_resources(device, busnum, domain, | ||
| 214 | bus); | ||
| 215 | bus->subordinate = pci_scan_child_bus(bus); | 261 | bus->subordinate = pci_scan_child_bus(bus); |
| 216 | } | 262 | } |
| 217 | } | 263 | } |
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index 572ee9782f2a..95ecbd495955 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c | |||
| @@ -6,10 +6,10 @@ | |||
| 6 | 6 | ||
| 7 | #ifdef CONFIG_X86_64 | 7 | #ifdef CONFIG_X86_64 |
| 8 | #include <asm/pci-direct.h> | 8 | #include <asm/pci-direct.h> |
| 9 | #include <asm/mpspec.h> | ||
| 10 | #include <linux/cpumask.h> | ||
| 11 | #endif | 9 | #endif |
| 12 | 10 | ||
| 11 | #include "bus_numa.h" | ||
| 12 | |||
| 13 | /* | 13 | /* |
| 14 | * This discovers the pcibus <-> node mapping on AMD K8. | 14 | * This discovers the pcibus <-> node mapping on AMD K8. |
| 15 | * also get peer root bus resource for io,mmio | 15 | * also get peer root bus resource for io,mmio |
| @@ -17,67 +17,6 @@ | |||
| 17 | 17 | ||
| 18 | #ifdef CONFIG_X86_64 | 18 | #ifdef CONFIG_X86_64 |
| 19 | 19 | ||
| 20 | /* | ||
| 21 | * sub bus (transparent) will use entres from 3 to store extra from root, | ||
| 22 | * so need to make sure have enought slot there, increase PCI_BUS_NUM_RESOURCES? | ||
| 23 | */ | ||
| 24 | #define RES_NUM 16 | ||
| 25 | struct pci_root_info { | ||
| 26 | char name[12]; | ||
| 27 | unsigned int res_num; | ||
| 28 | struct resource res[RES_NUM]; | ||
| 29 | int bus_min; | ||
| 30 | int bus_max; | ||
| 31 | int node; | ||
| 32 | int link; | ||
| 33 | }; | ||
| 34 | |||
| 35 | /* 4 at this time, it may become to 32 */ | ||
| 36 | #define PCI_ROOT_NR 4 | ||
| 37 | static int pci_root_num; | ||
| 38 | static struct pci_root_info pci_root_info[PCI_ROOT_NR]; | ||
| 39 | |||
| 40 | void x86_pci_root_bus_res_quirks(struct pci_bus *b) | ||
| 41 | { | ||
| 42 | int i; | ||
| 43 | int j; | ||
| 44 | struct pci_root_info *info; | ||
| 45 | |||
| 46 | /* don't go for it if _CRS is used already */ | ||
| 47 | if (b->resource[0] != &ioport_resource || | ||
| 48 | b->resource[1] != &iomem_resource) | ||
| 49 | return; | ||
| 50 | |||
| 51 | /* if only one root bus, don't need to anything */ | ||
| 52 | if (pci_root_num < 2) | ||
| 53 | return; | ||
| 54 | |||
| 55 | for (i = 0; i < pci_root_num; i++) { | ||
| 56 | if (pci_root_info[i].bus_min == b->number) | ||
| 57 | break; | ||
| 58 | } | ||
| 59 | |||
| 60 | if (i == pci_root_num) | ||
| 61 | return; | ||
| 62 | |||
| 63 | printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n", | ||
| 64 | b->number); | ||
| 65 | |||
| 66 | info = &pci_root_info[i]; | ||
| 67 | for (j = 0; j < info->res_num; j++) { | ||
| 68 | struct resource *res; | ||
| 69 | struct resource *root; | ||
| 70 | |||
| 71 | res = &info->res[j]; | ||
| 72 | b->resource[j] = res; | ||
| 73 | if (res->flags & IORESOURCE_IO) | ||
| 74 | root = &ioport_resource; | ||
| 75 | else | ||
| 76 | root = &iomem_resource; | ||
| 77 | insert_resource(root, res); | ||
| 78 | } | ||
| 79 | } | ||
| 80 | |||
| 81 | #define RANGE_NUM 16 | 20 | #define RANGE_NUM 16 |
| 82 | 21 | ||
| 83 | struct res_range { | 22 | struct res_range { |
| @@ -130,52 +69,6 @@ static void __init update_range(struct res_range *range, size_t start, | |||
| 130 | } | 69 | } |
| 131 | } | 70 | } |
| 132 | 71 | ||
| 133 | static void __init update_res(struct pci_root_info *info, size_t start, | ||
| 134 | size_t end, unsigned long flags, int merge) | ||
| 135 | { | ||
| 136 | int i; | ||
| 137 | struct resource *res; | ||
| 138 | |||
| 139 | if (!merge) | ||
| 140 | goto addit; | ||
| 141 | |||
| 142 | /* try to merge it with old one */ | ||
| 143 | for (i = 0; i < info->res_num; i++) { | ||
| 144 | size_t final_start, final_end; | ||
| 145 | size_t common_start, common_end; | ||
| 146 | |||
| 147 | res = &info->res[i]; | ||
| 148 | if (res->flags != flags) | ||
| 149 | continue; | ||
| 150 | |||
| 151 | common_start = max((size_t)res->start, start); | ||
| 152 | common_end = min((size_t)res->end, end); | ||
| 153 | if (common_start > common_end + 1) | ||
| 154 | continue; | ||
| 155 | |||
| 156 | final_start = min((size_t)res->start, start); | ||
| 157 | final_end = max((size_t)res->end, end); | ||
| 158 | |||
| 159 | res->start = final_start; | ||
| 160 | res->end = final_end; | ||
| 161 | return; | ||
| 162 | } | ||
| 163 | |||
| 164 | addit: | ||
| 165 | |||
| 166 | /* need to add that */ | ||
| 167 | if (info->res_num >= RES_NUM) | ||
| 168 | return; | ||
| 169 | |||
| 170 | res = &info->res[info->res_num]; | ||
| 171 | res->name = info->name; | ||
| 172 | res->flags = flags; | ||
| 173 | res->start = start; | ||
| 174 | res->end = end; | ||
| 175 | res->child = NULL; | ||
| 176 | info->res_num++; | ||
| 177 | } | ||
| 178 | |||
| 179 | struct pci_hostbridge_probe { | 72 | struct pci_hostbridge_probe { |
| 180 | u32 bus; | 73 | u32 bus; |
| 181 | u32 slot; | 74 | u32 slot; |
| @@ -230,7 +123,6 @@ static int __init early_fill_mp_bus_info(void) | |||
| 230 | int j; | 123 | int j; |
| 231 | unsigned bus; | 124 | unsigned bus; |
| 232 | unsigned slot; | 125 | unsigned slot; |
| 233 | int found; | ||
| 234 | int node; | 126 | int node; |
| 235 | int link; | 127 | int link; |
| 236 | int def_node; | 128 | int def_node; |
| @@ -247,7 +139,7 @@ static int __init early_fill_mp_bus_info(void) | |||
| 247 | if (!early_pci_allowed()) | 139 | if (!early_pci_allowed()) |
| 248 | return -1; | 140 | return -1; |
| 249 | 141 | ||
| 250 | found = 0; | 142 | found_all_numa_early = 0; |
| 251 | for (i = 0; i < ARRAY_SIZE(pci_probes); i++) { | 143 | for (i = 0; i < ARRAY_SIZE(pci_probes); i++) { |
| 252 | u32 id; | 144 | u32 id; |
| 253 | u16 device; | 145 | u16 device; |
| @@ -261,12 +153,12 @@ static int __init early_fill_mp_bus_info(void) | |||
| 261 | device = (id>>16) & 0xffff; | 153 | device = (id>>16) & 0xffff; |
| 262 | if (pci_probes[i].vendor == vendor && | 154 | if (pci_probes[i].vendor == vendor && |
| 263 | pci_probes[i].device == device) { | 155 | pci_probes[i].device == device) { |
| 264 | found = 1; | 156 | found_all_numa_early = 1; |
| 265 | break; | 157 | break; |
| 266 | } | 158 | } |
| 267 | } | 159 | } |
| 268 | 160 | ||
| 269 | if (!found) | 161 | if (!found_all_numa_early) |
| 270 | return 0; | 162 | return 0; |
| 271 | 163 | ||
| 272 | pci_root_num = 0; | 164 | pci_root_num = 0; |
| @@ -488,7 +380,7 @@ static int __init early_fill_mp_bus_info(void) | |||
| 488 | info = &pci_root_info[i]; | 380 | info = &pci_root_info[i]; |
| 489 | res_num = info->res_num; | 381 | res_num = info->res_num; |
| 490 | busnum = info->bus_min; | 382 | busnum = info->bus_min; |
| 491 | printk(KERN_DEBUG "bus: [%02x,%02x] on node %x link %x\n", | 383 | printk(KERN_DEBUG "bus: [%02x, %02x] on node %x link %x\n", |
| 492 | info->bus_min, info->bus_max, info->node, info->link); | 384 | info->bus_min, info->bus_max, info->node, info->link); |
| 493 | for (j = 0; j < res_num; j++) { | 385 | for (j = 0; j < res_num; j++) { |
| 494 | res = &info->res[j]; | 386 | res = &info->res[j]; |
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c new file mode 100644 index 000000000000..145df00e0387 --- /dev/null +++ b/arch/x86/pci/bus_numa.c | |||
| @@ -0,0 +1,101 @@ | |||
| 1 | #include <linux/init.h> | ||
| 2 | #include <linux/pci.h> | ||
| 3 | |||
| 4 | #include "bus_numa.h" | ||
| 5 | |||
| 6 | int pci_root_num; | ||
| 7 | struct pci_root_info pci_root_info[PCI_ROOT_NR]; | ||
| 8 | int found_all_numa_early; | ||
| 9 | |||
| 10 | void x86_pci_root_bus_res_quirks(struct pci_bus *b) | ||
| 11 | { | ||
| 12 | int i; | ||
| 13 | int j; | ||
| 14 | struct pci_root_info *info; | ||
| 15 | |||
| 16 | /* don't go for it if _CRS is used already */ | ||
| 17 | if (b->resource[0] != &ioport_resource || | ||
| 18 | b->resource[1] != &iomem_resource) | ||
| 19 | return; | ||
| 20 | |||
| 21 | if (!pci_root_num) | ||
| 22 | return; | ||
| 23 | |||
| 24 | /* for amd, if only one root bus, don't need to do anything */ | ||
| 25 | if (pci_root_num < 2 && found_all_numa_early) | ||
| 26 | return; | ||
| 27 | |||
| 28 | for (i = 0; i < pci_root_num; i++) { | ||
| 29 | if (pci_root_info[i].bus_min == b->number) | ||
| 30 | break; | ||
| 31 | } | ||
| 32 | |||
| 33 | if (i == pci_root_num) | ||
| 34 | return; | ||
| 35 | |||
| 36 | printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n", | ||
| 37 | b->number); | ||
| 38 | |||
| 39 | info = &pci_root_info[i]; | ||
| 40 | for (j = 0; j < info->res_num; j++) { | ||
| 41 | struct resource *res; | ||
| 42 | struct resource *root; | ||
| 43 | |||
| 44 | res = &info->res[j]; | ||
| 45 | b->resource[j] = res; | ||
| 46 | if (res->flags & IORESOURCE_IO) | ||
| 47 | root = &ioport_resource; | ||
| 48 | else | ||
| 49 | root = &iomem_resource; | ||
| 50 | insert_resource(root, res); | ||
| 51 | } | ||
| 52 | } | ||
| 53 | |||
| 54 | void __init update_res(struct pci_root_info *info, size_t start, | ||
| 55 | size_t end, unsigned long flags, int merge) | ||
| 56 | { | ||
| 57 | int i; | ||
| 58 | struct resource *res; | ||
| 59 | |||
| 60 | if (start > end) | ||
| 61 | return; | ||
| 62 | |||
| 63 | if (!merge) | ||
| 64 | goto addit; | ||
| 65 | |||
| 66 | /* try to merge it with old one */ | ||
| 67 | for (i = 0; i < info->res_num; i++) { | ||
| 68 | size_t final_start, final_end; | ||
| 69 | size_t common_start, common_end; | ||
| 70 | |||
| 71 | res = &info->res[i]; | ||
| 72 | if (res->flags != flags) | ||
| 73 | continue; | ||
| 74 | |||
| 75 | common_start = max((size_t)res->start, start); | ||
| 76 | common_end = min((size_t)res->end, end); | ||
| 77 | if (common_start > common_end + 1) | ||
| 78 | continue; | ||
| 79 | |||
| 80 | final_start = min((size_t)res->start, start); | ||
| 81 | final_end = max((size_t)res->end, end); | ||
| 82 | |||
| 83 | res->start = final_start; | ||
| 84 | res->end = final_end; | ||
| 85 | return; | ||
| 86 | } | ||
| 87 | |||
| 88 | addit: | ||
| 89 | |||
| 90 | /* need to add that */ | ||
| 91 | if (info->res_num >= RES_NUM) | ||
| 92 | return; | ||
| 93 | |||
| 94 | res = &info->res[info->res_num]; | ||
| 95 | res->name = info->name; | ||
| 96 | res->flags = flags; | ||
| 97 | res->start = start; | ||
| 98 | res->end = end; | ||
| 99 | res->child = NULL; | ||
| 100 | info->res_num++; | ||
| 101 | } | ||
diff --git a/arch/x86/pci/bus_numa.h b/arch/x86/pci/bus_numa.h new file mode 100644 index 000000000000..adbc23fe82ac --- /dev/null +++ b/arch/x86/pci/bus_numa.h | |||
| @@ -0,0 +1,27 @@ | |||
| 1 | #ifdef CONFIG_X86_64 | ||
| 2 | |||
| 3 | /* | ||
| 4 | * sub bus (transparent) will use entres from 3 to store extra from | ||
| 5 | * root, so need to make sure we have enough slot there, Should we | ||
| 6 | * increase PCI_BUS_NUM_RESOURCES? | ||
| 7 | */ | ||
| 8 | #define RES_NUM 16 | ||
| 9 | struct pci_root_info { | ||
| 10 | char name[12]; | ||
| 11 | unsigned int res_num; | ||
| 12 | struct resource res[RES_NUM]; | ||
| 13 | int bus_min; | ||
| 14 | int bus_max; | ||
| 15 | int node; | ||
| 16 | int link; | ||
| 17 | }; | ||
| 18 | |||
| 19 | /* 4 at this time, it may become to 32 */ | ||
| 20 | #define PCI_ROOT_NR 4 | ||
| 21 | extern int pci_root_num; | ||
| 22 | extern struct pci_root_info pci_root_info[PCI_ROOT_NR]; | ||
| 23 | extern int found_all_numa_early; | ||
| 24 | |||
| 25 | extern void update_res(struct pci_root_info *info, size_t start, | ||
| 26 | size_t end, unsigned long flags, int merge); | ||
| 27 | #endif | ||
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 1331fcf26143..d2552c68e94d 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
| @@ -410,8 +410,6 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum) | |||
| 410 | return bus; | 410 | return bus; |
| 411 | } | 411 | } |
| 412 | 412 | ||
| 413 | extern u8 pci_cache_line_size; | ||
| 414 | |||
| 415 | int __init pcibios_init(void) | 413 | int __init pcibios_init(void) |
| 416 | { | 414 | { |
| 417 | struct cpuinfo_x86 *c = &boot_cpu_data; | 415 | struct cpuinfo_x86 *c = &boot_cpu_data; |
| @@ -422,15 +420,19 @@ int __init pcibios_init(void) | |||
| 422 | } | 420 | } |
| 423 | 421 | ||
| 424 | /* | 422 | /* |
| 425 | * Assume PCI cacheline size of 32 bytes for all x86s except K7/K8 | 423 | * Set PCI cacheline size to that of the CPU if the CPU has reported it. |
| 426 | * and P4. It's also good for 386/486s (which actually have 16) | 424 | * (For older CPUs that don't support cpuid, we se it to 32 bytes |
| 425 | * It's also good for 386/486s (which actually have 16) | ||
| 427 | * as quite a few PCI devices do not support smaller values. | 426 | * as quite a few PCI devices do not support smaller values. |
| 428 | */ | 427 | */ |
| 429 | pci_cache_line_size = 32 >> 2; | 428 | if (c->x86_clflush_size > 0) { |
| 430 | if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD) | 429 | pci_dfl_cache_line_size = c->x86_clflush_size >> 2; |
| 431 | pci_cache_line_size = 64 >> 2; /* K7 & K8 */ | 430 | printk(KERN_DEBUG "PCI: pci_cache_line_size set to %d bytes\n", |
| 432 | else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL) | 431 | pci_dfl_cache_line_size << 2); |
| 433 | pci_cache_line_size = 128 >> 2; /* P4 */ | 432 | } else { |
| 433 | pci_dfl_cache_line_size = 32 >> 2; | ||
| 434 | printk(KERN_DEBUG "PCI: Unknown cacheline size. Setting to 32 bytes\n"); | ||
| 435 | } | ||
| 434 | 436 | ||
| 435 | pcibios_resource_survey(); | 437 | pcibios_resource_survey(); |
| 436 | 438 | ||
diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c index aaf26ae58cd5..d1067d539bee 100644 --- a/arch/x86/pci/early.c +++ b/arch/x86/pci/early.c | |||
| @@ -12,8 +12,6 @@ u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset) | |||
| 12 | u32 v; | 12 | u32 v; |
| 13 | outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); | 13 | outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); |
| 14 | v = inl(0xcfc); | 14 | v = inl(0xcfc); |
| 15 | if (v != 0xffffffff) | ||
| 16 | pr_debug("%x reading 4 from %x: %x\n", slot, offset, v); | ||
| 17 | return v; | 15 | return v; |
| 18 | } | 16 | } |
| 19 | 17 | ||
| @@ -22,7 +20,6 @@ u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset) | |||
| 22 | u8 v; | 20 | u8 v; |
| 23 | outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); | 21 | outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); |
| 24 | v = inb(0xcfc + (offset&3)); | 22 | v = inb(0xcfc + (offset&3)); |
| 25 | pr_debug("%x reading 1 from %x: %x\n", slot, offset, v); | ||
| 26 | return v; | 23 | return v; |
| 27 | } | 24 | } |
| 28 | 25 | ||
| @@ -31,28 +28,24 @@ u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset) | |||
| 31 | u16 v; | 28 | u16 v; |
| 32 | outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); | 29 | outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); |
| 33 | v = inw(0xcfc + (offset&2)); | 30 | v = inw(0xcfc + (offset&2)); |
| 34 | pr_debug("%x reading 2 from %x: %x\n", slot, offset, v); | ||
| 35 | return v; | 31 | return v; |
| 36 | } | 32 | } |
| 37 | 33 | ||
| 38 | void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, | 34 | void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, |
| 39 | u32 val) | 35 | u32 val) |
| 40 | { | 36 | { |
| 41 | pr_debug("%x writing to %x: %x\n", slot, offset, val); | ||
| 42 | outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); | 37 | outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); |
| 43 | outl(val, 0xcfc); | 38 | outl(val, 0xcfc); |
| 44 | } | 39 | } |
| 45 | 40 | ||
| 46 | void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val) | 41 | void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val) |
| 47 | { | 42 | { |
| 48 | pr_debug("%x writing to %x: %x\n", slot, offset, val); | ||
| 49 | outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); | 43 | outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); |
| 50 | outb(val, 0xcfc + (offset&3)); | 44 | outb(val, 0xcfc + (offset&3)); |
| 51 | } | 45 | } |
| 52 | 46 | ||
| 53 | void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val) | 47 | void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val) |
| 54 | { | 48 | { |
| 55 | pr_debug("%x writing to %x: %x\n", slot, offset, val); | ||
| 56 | outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); | 49 | outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); |
| 57 | outw(val, 0xcfc + (offset&2)); | 50 | outw(val, 0xcfc + (offset&2)); |
| 58 | } | 51 | } |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index b22d13b0c71d..5dc9e8c63fcd 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
| @@ -129,7 +129,9 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list) | |||
| 129 | continue; | 129 | continue; |
| 130 | if (!r->start || | 130 | if (!r->start || |
| 131 | pci_claim_resource(dev, idx) < 0) { | 131 | pci_claim_resource(dev, idx) < 0) { |
| 132 | dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx); | 132 | dev_info(&dev->dev, |
| 133 | "can't reserve window %pR\n", | ||
| 134 | r); | ||
| 133 | /* | 135 | /* |
| 134 | * Something is wrong with the region. | 136 | * Something is wrong with the region. |
| 135 | * Invalidate the resource to prevent | 137 | * Invalidate the resource to prevent |
| @@ -144,16 +146,29 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list) | |||
| 144 | } | 146 | } |
| 145 | } | 147 | } |
| 146 | 148 | ||
| 149 | struct pci_check_idx_range { | ||
| 150 | int start; | ||
| 151 | int end; | ||
| 152 | }; | ||
| 153 | |||
| 147 | static void __init pcibios_allocate_resources(int pass) | 154 | static void __init pcibios_allocate_resources(int pass) |
| 148 | { | 155 | { |
| 149 | struct pci_dev *dev = NULL; | 156 | struct pci_dev *dev = NULL; |
| 150 | int idx, disabled; | 157 | int idx, disabled, i; |
| 151 | u16 command; | 158 | u16 command; |
| 152 | struct resource *r; | 159 | struct resource *r; |
| 153 | 160 | ||
| 161 | struct pci_check_idx_range idx_range[] = { | ||
| 162 | { PCI_STD_RESOURCES, PCI_STD_RESOURCE_END }, | ||
| 163 | #ifdef CONFIG_PCI_IOV | ||
| 164 | { PCI_IOV_RESOURCES, PCI_IOV_RESOURCE_END }, | ||
| 165 | #endif | ||
| 166 | }; | ||
| 167 | |||
| 154 | for_each_pci_dev(dev) { | 168 | for_each_pci_dev(dev) { |
| 155 | pci_read_config_word(dev, PCI_COMMAND, &command); | 169 | pci_read_config_word(dev, PCI_COMMAND, &command); |
| 156 | for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) { | 170 | for (i = 0; i < ARRAY_SIZE(idx_range); i++) |
| 171 | for (idx = idx_range[i].start; idx <= idx_range[i].end; idx++) { | ||
| 157 | r = &dev->resource[idx]; | 172 | r = &dev->resource[idx]; |
| 158 | if (r->parent) /* Already allocated */ | 173 | if (r->parent) /* Already allocated */ |
| 159 | continue; | 174 | continue; |
| @@ -164,12 +179,12 @@ static void __init pcibios_allocate_resources(int pass) | |||
| 164 | else | 179 | else |
| 165 | disabled = !(command & PCI_COMMAND_MEMORY); | 180 | disabled = !(command & PCI_COMMAND_MEMORY); |
| 166 | if (pass == disabled) { | 181 | if (pass == disabled) { |
| 167 | dev_dbg(&dev->dev, "resource %#08llx-%#08llx (f=%lx, d=%d, p=%d)\n", | 182 | dev_dbg(&dev->dev, |
| 168 | (unsigned long long) r->start, | 183 | "BAR %d: reserving %pr (d=%d, p=%d)\n", |
| 169 | (unsigned long long) r->end, | 184 | idx, r, disabled, pass); |
| 170 | r->flags, disabled, pass); | ||
| 171 | if (pci_claim_resource(dev, idx) < 0) { | 185 | if (pci_claim_resource(dev, idx) < 0) { |
| 172 | dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx); | 186 | dev_info(&dev->dev, |
| 187 | "can't reserve %pR\n", r); | ||
| 173 | /* We'll assign a new address later */ | 188 | /* We'll assign a new address later */ |
| 174 | r->end -= r->start; | 189 | r->end -= r->start; |
| 175 | r->start = 0; | 190 | r->start = 0; |
| @@ -182,7 +197,7 @@ static void __init pcibios_allocate_resources(int pass) | |||
| 182 | /* Turn the ROM off, leave the resource region, | 197 | /* Turn the ROM off, leave the resource region, |
| 183 | * but keep it unregistered. */ | 198 | * but keep it unregistered. */ |
| 184 | u32 reg; | 199 | u32 reg; |
| 185 | dev_dbg(&dev->dev, "disabling ROM\n"); | 200 | dev_dbg(&dev->dev, "disabling ROM %pR\n", r); |
| 186 | r->flags &= ~IORESOURCE_ROM_ENABLE; | 201 | r->flags &= ~IORESOURCE_ROM_ENABLE; |
| 187 | pci_read_config_dword(dev, | 202 | pci_read_config_dword(dev, |
| 188 | dev->rom_base_reg, ®); | 203 | dev->rom_base_reg, ®); |
| @@ -282,6 +297,15 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |||
| 282 | return -EINVAL; | 297 | return -EINVAL; |
| 283 | 298 | ||
| 284 | prot = pgprot_val(vma->vm_page_prot); | 299 | prot = pgprot_val(vma->vm_page_prot); |
| 300 | |||
| 301 | /* | ||
| 302 | * Return error if pat is not enabled and write_combine is requested. | ||
| 303 | * Caller can followup with UC MINUS request and add a WC mtrr if there | ||
| 304 | * is a free mtrr slot. | ||
| 305 | */ | ||
| 306 | if (!pat_enabled && write_combine) | ||
| 307 | return -EINVAL; | ||
| 308 | |||
| 285 | if (pat_enabled && write_combine) | 309 | if (pat_enabled && write_combine) |
| 286 | prot |= _PAGE_CACHE_WC; | 310 | prot |= _PAGE_CACHE_WC; |
| 287 | else if (pat_enabled || boot_cpu_data.x86 > 3) | 311 | else if (pat_enabled || boot_cpu_data.x86 > 3) |
diff --git a/arch/x86/pci/intel_bus.c b/arch/x86/pci/intel_bus.c new file mode 100644 index 000000000000..b7a55dc55d13 --- /dev/null +++ b/arch/x86/pci/intel_bus.c | |||
| @@ -0,0 +1,90 @@ | |||
| 1 | /* | ||
| 2 | * to read io range from IOH pci conf, need to do it after mmconfig is there | ||
| 3 | */ | ||
| 4 | |||
| 5 | #include <linux/delay.h> | ||
| 6 | #include <linux/dmi.h> | ||
| 7 | #include <linux/pci.h> | ||
| 8 | #include <linux/init.h> | ||
| 9 | #include <asm/pci_x86.h> | ||
| 10 | |||
| 11 | #include "bus_numa.h" | ||
| 12 | |||
| 13 | static inline void print_ioh_resources(struct pci_root_info *info) | ||
| 14 | { | ||
| 15 | int res_num; | ||
| 16 | int busnum; | ||
| 17 | int i; | ||
| 18 | |||
| 19 | printk(KERN_DEBUG "IOH bus: [%02x, %02x]\n", | ||
| 20 | info->bus_min, info->bus_max); | ||
| 21 | res_num = info->res_num; | ||
| 22 | busnum = info->bus_min; | ||
| 23 | for (i = 0; i < res_num; i++) { | ||
| 24 | struct resource *res; | ||
| 25 | |||
| 26 | res = &info->res[i]; | ||
| 27 | printk(KERN_DEBUG "IOH bus: %02x index %x %s: [%llx, %llx]\n", | ||
| 28 | busnum, i, | ||
| 29 | (res->flags & IORESOURCE_IO) ? "io port" : | ||
| 30 | "mmio", | ||
| 31 | res->start, res->end); | ||
| 32 | } | ||
| 33 | } | ||
| 34 | |||
| 35 | #define IOH_LIO 0x108 | ||
| 36 | #define IOH_LMMIOL 0x10c | ||
| 37 | #define IOH_LMMIOH 0x110 | ||
| 38 | #define IOH_LMMIOH_BASEU 0x114 | ||
| 39 | #define IOH_LMMIOH_LIMITU 0x118 | ||
| 40 | #define IOH_LCFGBUS 0x11c | ||
| 41 | |||
| 42 | static void __devinit pci_root_bus_res(struct pci_dev *dev) | ||
| 43 | { | ||
| 44 | u16 word; | ||
| 45 | u32 dword; | ||
| 46 | struct pci_root_info *info; | ||
| 47 | u16 io_base, io_end; | ||
| 48 | u32 mmiol_base, mmiol_end; | ||
| 49 | u64 mmioh_base, mmioh_end; | ||
| 50 | int bus_base, bus_end; | ||
| 51 | |||
| 52 | if (pci_root_num >= PCI_ROOT_NR) { | ||
| 53 | printk(KERN_DEBUG "intel_bus.c: PCI_ROOT_NR is too small\n"); | ||
| 54 | return; | ||
| 55 | } | ||
| 56 | |||
| 57 | info = &pci_root_info[pci_root_num]; | ||
| 58 | pci_root_num++; | ||
| 59 | |||
| 60 | pci_read_config_word(dev, IOH_LCFGBUS, &word); | ||
| 61 | bus_base = (word & 0xff); | ||
| 62 | bus_end = (word & 0xff00) >> 8; | ||
| 63 | sprintf(info->name, "PCI Bus #%02x", bus_base); | ||
| 64 | info->bus_min = bus_base; | ||
| 65 | info->bus_max = bus_end; | ||
| 66 | |||
| 67 | pci_read_config_word(dev, IOH_LIO, &word); | ||
| 68 | io_base = (word & 0xf0) << (12 - 4); | ||
| 69 | io_end = (word & 0xf000) | 0xfff; | ||
| 70 | update_res(info, io_base, io_end, IORESOURCE_IO, 0); | ||
| 71 | |||
| 72 | pci_read_config_dword(dev, IOH_LMMIOL, &dword); | ||
| 73 | mmiol_base = (dword & 0xff00) << (24 - 8); | ||
| 74 | mmiol_end = (dword & 0xff000000) | 0xffffff; | ||
| 75 | update_res(info, mmiol_base, mmiol_end, IORESOURCE_MEM, 0); | ||
| 76 | |||
| 77 | pci_read_config_dword(dev, IOH_LMMIOH, &dword); | ||
| 78 | mmioh_base = ((u64)(dword & 0xfc00)) << (26 - 10); | ||
| 79 | mmioh_end = ((u64)(dword & 0xfc000000) | 0x3ffffff); | ||
| 80 | pci_read_config_dword(dev, IOH_LMMIOH_BASEU, &dword); | ||
| 81 | mmioh_base |= ((u64)(dword & 0x7ffff)) << 32; | ||
| 82 | pci_read_config_dword(dev, IOH_LMMIOH_LIMITU, &dword); | ||
| 83 | mmioh_end |= ((u64)(dword & 0x7ffff)) << 32; | ||
| 84 | update_res(info, mmioh_base, mmioh_end, IORESOURCE_MEM, 0); | ||
| 85 | |||
| 86 | print_ioh_resources(info); | ||
| 87 | } | ||
| 88 | |||
| 89 | /* intel IOH */ | ||
| 90 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, pci_root_bus_res); | ||
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index 602c172d3bd5..b19d1e54201e 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c | |||
| @@ -15,48 +15,98 @@ | |||
| 15 | #include <linux/acpi.h> | 15 | #include <linux/acpi.h> |
| 16 | #include <linux/sfi_acpi.h> | 16 | #include <linux/sfi_acpi.h> |
| 17 | #include <linux/bitmap.h> | 17 | #include <linux/bitmap.h> |
| 18 | #include <linux/sort.h> | 18 | #include <linux/dmi.h> |
| 19 | #include <asm/e820.h> | 19 | #include <asm/e820.h> |
| 20 | #include <asm/pci_x86.h> | 20 | #include <asm/pci_x86.h> |
| 21 | #include <asm/acpi.h> | 21 | #include <asm/acpi.h> |
| 22 | 22 | ||
| 23 | #define PREFIX "PCI: " | 23 | #define PREFIX "PCI: " |
| 24 | 24 | ||
| 25 | /* aperture is up to 256MB but BIOS may reserve less */ | ||
| 26 | #define MMCONFIG_APER_MIN (2 * 1024*1024) | ||
| 27 | #define MMCONFIG_APER_MAX (256 * 1024*1024) | ||
| 28 | |||
| 29 | /* Indicate if the mmcfg resources have been placed into the resource table. */ | 25 | /* Indicate if the mmcfg resources have been placed into the resource table. */ |
| 30 | static int __initdata pci_mmcfg_resources_inserted; | 26 | static int __initdata pci_mmcfg_resources_inserted; |
| 31 | 27 | ||
| 32 | static __init int extend_mmcfg(int num) | 28 | LIST_HEAD(pci_mmcfg_list); |
| 29 | |||
| 30 | static __init void pci_mmconfig_remove(struct pci_mmcfg_region *cfg) | ||
| 33 | { | 31 | { |
| 34 | struct acpi_mcfg_allocation *new; | 32 | if (cfg->res.parent) |
| 35 | int new_num = pci_mmcfg_config_num + num; | 33 | release_resource(&cfg->res); |
| 34 | list_del(&cfg->list); | ||
| 35 | kfree(cfg); | ||
| 36 | } | ||
| 36 | 37 | ||
| 37 | new = kzalloc(sizeof(pci_mmcfg_config[0]) * new_num, GFP_KERNEL); | 38 | static __init void free_all_mmcfg(void) |
| 38 | if (!new) | 39 | { |
| 39 | return -1; | 40 | struct pci_mmcfg_region *cfg, *tmp; |
| 40 | 41 | ||
| 41 | if (pci_mmcfg_config) { | 42 | pci_mmcfg_arch_free(); |
| 42 | memcpy(new, pci_mmcfg_config, | 43 | list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list) |
| 43 | sizeof(pci_mmcfg_config[0]) * new_num); | 44 | pci_mmconfig_remove(cfg); |
| 44 | kfree(pci_mmcfg_config); | 45 | } |
| 46 | |||
| 47 | static __init void list_add_sorted(struct pci_mmcfg_region *new) | ||
| 48 | { | ||
| 49 | struct pci_mmcfg_region *cfg; | ||
| 50 | |||
| 51 | /* keep list sorted by segment and starting bus number */ | ||
| 52 | list_for_each_entry(cfg, &pci_mmcfg_list, list) { | ||
| 53 | if (cfg->segment > new->segment || | ||
| 54 | (cfg->segment == new->segment && | ||
| 55 | cfg->start_bus >= new->start_bus)) { | ||
| 56 | list_add_tail(&new->list, &cfg->list); | ||
| 57 | return; | ||
| 58 | } | ||
| 45 | } | 59 | } |
| 46 | pci_mmcfg_config = new; | 60 | list_add_tail(&new->list, &pci_mmcfg_list); |
| 61 | } | ||
| 47 | 62 | ||
| 48 | return 0; | 63 | static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start, |
| 64 | int end, u64 addr) | ||
| 65 | { | ||
| 66 | struct pci_mmcfg_region *new; | ||
| 67 | int num_buses; | ||
| 68 | struct resource *res; | ||
| 69 | |||
| 70 | if (addr == 0) | ||
| 71 | return NULL; | ||
| 72 | |||
| 73 | new = kzalloc(sizeof(*new), GFP_KERNEL); | ||
| 74 | if (!new) | ||
| 75 | return NULL; | ||
| 76 | |||
| 77 | new->address = addr; | ||
| 78 | new->segment = segment; | ||
| 79 | new->start_bus = start; | ||
| 80 | new->end_bus = end; | ||
| 81 | |||
| 82 | list_add_sorted(new); | ||
| 83 | |||
| 84 | num_buses = end - start + 1; | ||
| 85 | res = &new->res; | ||
| 86 | res->start = addr + PCI_MMCFG_BUS_OFFSET(start); | ||
| 87 | res->end = addr + PCI_MMCFG_BUS_OFFSET(num_buses) - 1; | ||
| 88 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | ||
| 89 | snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN, | ||
| 90 | "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end); | ||
| 91 | res->name = new->name; | ||
| 92 | |||
| 93 | printk(KERN_INFO PREFIX "MMCONFIG for domain %04x [bus %02x-%02x] at " | ||
| 94 | "%pR (base %#lx)\n", segment, start, end, &new->res, | ||
| 95 | (unsigned long) addr); | ||
| 96 | |||
| 97 | return new; | ||
| 49 | } | 98 | } |
| 50 | 99 | ||
| 51 | static __init void fill_one_mmcfg(u64 addr, int segment, int start, int end) | 100 | struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus) |
| 52 | { | 101 | { |
| 53 | int i = pci_mmcfg_config_num; | 102 | struct pci_mmcfg_region *cfg; |
| 54 | 103 | ||
| 55 | pci_mmcfg_config_num++; | 104 | list_for_each_entry(cfg, &pci_mmcfg_list, list) |
| 56 | pci_mmcfg_config[i].address = addr; | 105 | if (cfg->segment == segment && |
| 57 | pci_mmcfg_config[i].pci_segment = segment; | 106 | cfg->start_bus <= bus && bus <= cfg->end_bus) |
| 58 | pci_mmcfg_config[i].start_bus_number = start; | 107 | return cfg; |
| 59 | pci_mmcfg_config[i].end_bus_number = end; | 108 | |
| 109 | return NULL; | ||
| 60 | } | 110 | } |
| 61 | 111 | ||
| 62 | static const char __init *pci_mmcfg_e7520(void) | 112 | static const char __init *pci_mmcfg_e7520(void) |
| @@ -68,11 +118,9 @@ static const char __init *pci_mmcfg_e7520(void) | |||
| 68 | if (win == 0x0000 || win == 0xf000) | 118 | if (win == 0x0000 || win == 0xf000) |
| 69 | return NULL; | 119 | return NULL; |
| 70 | 120 | ||
| 71 | if (extend_mmcfg(1) == -1) | 121 | if (pci_mmconfig_add(0, 0, 255, win << 16) == NULL) |
| 72 | return NULL; | 122 | return NULL; |
| 73 | 123 | ||
| 74 | fill_one_mmcfg(win << 16, 0, 0, 255); | ||
| 75 | |||
| 76 | return "Intel Corporation E7520 Memory Controller Hub"; | 124 | return "Intel Corporation E7520 Memory Controller Hub"; |
| 77 | } | 125 | } |
| 78 | 126 | ||
| @@ -114,11 +162,9 @@ static const char __init *pci_mmcfg_intel_945(void) | |||
| 114 | if ((pciexbar & mask) >= 0xf0000000U) | 162 | if ((pciexbar & mask) >= 0xf0000000U) |
| 115 | return NULL; | 163 | return NULL; |
| 116 | 164 | ||
| 117 | if (extend_mmcfg(1) == -1) | 165 | if (pci_mmconfig_add(0, 0, (len >> 20) - 1, pciexbar & mask) == NULL) |
| 118 | return NULL; | 166 | return NULL; |
| 119 | 167 | ||
| 120 | fill_one_mmcfg(pciexbar & mask, 0, 0, (len >> 20) - 1); | ||
| 121 | |||
| 122 | return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub"; | 168 | return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub"; |
| 123 | } | 169 | } |
| 124 | 170 | ||
| @@ -127,7 +173,7 @@ static const char __init *pci_mmcfg_amd_fam10h(void) | |||
| 127 | u32 low, high, address; | 173 | u32 low, high, address; |
| 128 | u64 base, msr; | 174 | u64 base, msr; |
| 129 | int i; | 175 | int i; |
| 130 | unsigned segnbits = 0, busnbits; | 176 | unsigned segnbits = 0, busnbits, end_bus; |
| 131 | 177 | ||
| 132 | if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF)) | 178 | if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF)) |
| 133 | return NULL; | 179 | return NULL; |
| @@ -161,11 +207,13 @@ static const char __init *pci_mmcfg_amd_fam10h(void) | |||
| 161 | busnbits = 8; | 207 | busnbits = 8; |
| 162 | } | 208 | } |
| 163 | 209 | ||
| 164 | if (extend_mmcfg(1 << segnbits) == -1) | 210 | end_bus = (1 << busnbits) - 1; |
| 165 | return NULL; | ||
| 166 | |||
| 167 | for (i = 0; i < (1 << segnbits); i++) | 211 | for (i = 0; i < (1 << segnbits); i++) |
| 168 | fill_one_mmcfg(base + (1<<28) * i, i, 0, (1 << busnbits) - 1); | 212 | if (pci_mmconfig_add(i, 0, end_bus, |
| 213 | base + (1<<28) * i) == NULL) { | ||
| 214 | free_all_mmcfg(); | ||
| 215 | return NULL; | ||
| 216 | } | ||
| 169 | 217 | ||
| 170 | return "AMD Family 10h NB"; | 218 | return "AMD Family 10h NB"; |
| 171 | } | 219 | } |
| @@ -190,7 +238,7 @@ static const char __init *pci_mmcfg_nvidia_mcp55(void) | |||
| 190 | /* | 238 | /* |
| 191 | * do check if amd fam10h already took over | 239 | * do check if amd fam10h already took over |
| 192 | */ | 240 | */ |
| 193 | if (!acpi_disabled || pci_mmcfg_config_num || mcp55_checked) | 241 | if (!acpi_disabled || !list_empty(&pci_mmcfg_list) || mcp55_checked) |
| 194 | return NULL; | 242 | return NULL; |
| 195 | 243 | ||
| 196 | mcp55_checked = true; | 244 | mcp55_checked = true; |
| @@ -213,16 +261,14 @@ static const char __init *pci_mmcfg_nvidia_mcp55(void) | |||
| 213 | if (!(extcfg & extcfg_enable_mask)) | 261 | if (!(extcfg & extcfg_enable_mask)) |
| 214 | continue; | 262 | continue; |
| 215 | 263 | ||
| 216 | if (extend_mmcfg(1) == -1) | ||
| 217 | continue; | ||
| 218 | |||
| 219 | size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift; | 264 | size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift; |
| 220 | base = extcfg & extcfg_base_mask[size_index]; | 265 | base = extcfg & extcfg_base_mask[size_index]; |
| 221 | /* base could > 4G */ | 266 | /* base could > 4G */ |
| 222 | base <<= extcfg_base_lshift; | 267 | base <<= extcfg_base_lshift; |
| 223 | start = (extcfg & extcfg_start_mask) >> extcfg_start_shift; | 268 | start = (extcfg & extcfg_start_mask) >> extcfg_start_shift; |
| 224 | end = start + extcfg_sizebus[size_index] - 1; | 269 | end = start + extcfg_sizebus[size_index] - 1; |
| 225 | fill_one_mmcfg(base, 0, start, end); | 270 | if (pci_mmconfig_add(0, start, end, base) == NULL) |
| 271 | continue; | ||
| 226 | mcp55_mmconf_found++; | 272 | mcp55_mmconf_found++; |
| 227 | } | 273 | } |
| 228 | 274 | ||
| @@ -253,45 +299,27 @@ static struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initdata = { | |||
| 253 | 0x0369, pci_mmcfg_nvidia_mcp55 }, | 299 | 0x0369, pci_mmcfg_nvidia_mcp55 }, |
| 254 | }; | 300 | }; |
| 255 | 301 | ||
| 256 | static int __init cmp_mmcfg(const void *x1, const void *x2) | ||
| 257 | { | ||
| 258 | const typeof(pci_mmcfg_config[0]) *m1 = x1; | ||
| 259 | const typeof(pci_mmcfg_config[0]) *m2 = x2; | ||
| 260 | int start1, start2; | ||
| 261 | |||
| 262 | start1 = m1->start_bus_number; | ||
| 263 | start2 = m2->start_bus_number; | ||
| 264 | |||
| 265 | return start1 - start2; | ||
| 266 | } | ||
| 267 | |||
| 268 | static void __init pci_mmcfg_check_end_bus_number(void) | 302 | static void __init pci_mmcfg_check_end_bus_number(void) |
| 269 | { | 303 | { |
| 270 | int i; | 304 | struct pci_mmcfg_region *cfg, *cfgx; |
| 271 | typeof(pci_mmcfg_config[0]) *cfg, *cfgx; | ||
| 272 | |||
| 273 | /* sort them at first */ | ||
| 274 | sort(pci_mmcfg_config, pci_mmcfg_config_num, | ||
| 275 | sizeof(pci_mmcfg_config[0]), cmp_mmcfg, NULL); | ||
| 276 | 305 | ||
| 277 | /* last one*/ | 306 | /* last one*/ |
| 278 | if (pci_mmcfg_config_num > 0) { | 307 | cfg = list_entry(pci_mmcfg_list.prev, typeof(*cfg), list); |
| 279 | i = pci_mmcfg_config_num - 1; | 308 | if (cfg) |
| 280 | cfg = &pci_mmcfg_config[i]; | 309 | if (cfg->end_bus < cfg->start_bus) |
| 281 | if (cfg->end_bus_number < cfg->start_bus_number) | 310 | cfg->end_bus = 255; |
| 282 | cfg->end_bus_number = 255; | ||
| 283 | } | ||
| 284 | 311 | ||
| 285 | /* don't overlap please */ | 312 | if (list_is_singular(&pci_mmcfg_list)) |
| 286 | for (i = 0; i < pci_mmcfg_config_num - 1; i++) { | 313 | return; |
| 287 | cfg = &pci_mmcfg_config[i]; | ||
| 288 | cfgx = &pci_mmcfg_config[i+1]; | ||
| 289 | 314 | ||
| 290 | if (cfg->end_bus_number < cfg->start_bus_number) | 315 | /* don't overlap please */ |
| 291 | cfg->end_bus_number = 255; | 316 | list_for_each_entry(cfg, &pci_mmcfg_list, list) { |
| 317 | if (cfg->end_bus < cfg->start_bus) | ||
| 318 | cfg->end_bus = 255; | ||
| 292 | 319 | ||
| 293 | if (cfg->end_bus_number >= cfgx->start_bus_number) | 320 | cfgx = list_entry(cfg->list.next, typeof(*cfg), list); |
| 294 | cfg->end_bus_number = cfgx->start_bus_number - 1; | 321 | if (cfg != cfgx && cfg->end_bus >= cfgx->start_bus) |
| 322 | cfg->end_bus = cfgx->start_bus - 1; | ||
| 295 | } | 323 | } |
| 296 | } | 324 | } |
| 297 | 325 | ||
| @@ -306,8 +334,7 @@ static int __init pci_mmcfg_check_hostbridge(void) | |||
| 306 | if (!raw_pci_ops) | 334 | if (!raw_pci_ops) |
| 307 | return 0; | 335 | return 0; |
| 308 | 336 | ||
| 309 | pci_mmcfg_config_num = 0; | 337 | free_all_mmcfg(); |
| 310 | pci_mmcfg_config = NULL; | ||
| 311 | 338 | ||
| 312 | for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) { | 339 | for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) { |
| 313 | bus = pci_mmcfg_probes[i].bus; | 340 | bus = pci_mmcfg_probes[i].bus; |
| @@ -322,45 +349,22 @@ static int __init pci_mmcfg_check_hostbridge(void) | |||
| 322 | name = pci_mmcfg_probes[i].probe(); | 349 | name = pci_mmcfg_probes[i].probe(); |
| 323 | 350 | ||
| 324 | if (name) | 351 | if (name) |
| 325 | printk(KERN_INFO "PCI: Found %s with MMCONFIG support.\n", | 352 | printk(KERN_INFO PREFIX "%s with MMCONFIG support\n", |
| 326 | name); | 353 | name); |
| 327 | } | 354 | } |
| 328 | 355 | ||
| 329 | /* some end_bus_number is crazy, fix it */ | 356 | /* some end_bus_number is crazy, fix it */ |
| 330 | pci_mmcfg_check_end_bus_number(); | 357 | pci_mmcfg_check_end_bus_number(); |
| 331 | 358 | ||
| 332 | return pci_mmcfg_config_num != 0; | 359 | return !list_empty(&pci_mmcfg_list); |
| 333 | } | 360 | } |
| 334 | 361 | ||
| 335 | static void __init pci_mmcfg_insert_resources(void) | 362 | static void __init pci_mmcfg_insert_resources(void) |
| 336 | { | 363 | { |
| 337 | #define PCI_MMCFG_RESOURCE_NAME_LEN 24 | 364 | struct pci_mmcfg_region *cfg; |
| 338 | int i; | ||
| 339 | struct resource *res; | ||
| 340 | char *names; | ||
| 341 | unsigned num_buses; | ||
| 342 | |||
| 343 | res = kcalloc(PCI_MMCFG_RESOURCE_NAME_LEN + sizeof(*res), | ||
| 344 | pci_mmcfg_config_num, GFP_KERNEL); | ||
| 345 | if (!res) { | ||
| 346 | printk(KERN_ERR "PCI: Unable to allocate MMCONFIG resources\n"); | ||
| 347 | return; | ||
| 348 | } | ||
| 349 | 365 | ||
| 350 | names = (void *)&res[pci_mmcfg_config_num]; | 366 | list_for_each_entry(cfg, &pci_mmcfg_list, list) |
| 351 | for (i = 0; i < pci_mmcfg_config_num; i++, res++) { | 367 | insert_resource(&iomem_resource, &cfg->res); |
| 352 | struct acpi_mcfg_allocation *cfg = &pci_mmcfg_config[i]; | ||
| 353 | num_buses = cfg->end_bus_number - cfg->start_bus_number + 1; | ||
| 354 | res->name = names; | ||
| 355 | snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN, | ||
| 356 | "PCI MMCONFIG %u [%02x-%02x]", cfg->pci_segment, | ||
| 357 | cfg->start_bus_number, cfg->end_bus_number); | ||
| 358 | res->start = cfg->address + (cfg->start_bus_number << 20); | ||
| 359 | res->end = res->start + (num_buses << 20) - 1; | ||
| 360 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | ||
| 361 | insert_resource(&iomem_resource, res); | ||
| 362 | names += PCI_MMCFG_RESOURCE_NAME_LEN; | ||
| 363 | } | ||
| 364 | 368 | ||
| 365 | /* Mark that the resources have been inserted. */ | 369 | /* Mark that the resources have been inserted. */ |
| 366 | pci_mmcfg_resources_inserted = 1; | 370 | pci_mmcfg_resources_inserted = 1; |
| @@ -437,11 +441,12 @@ static int __init is_acpi_reserved(u64 start, u64 end, unsigned not_used) | |||
| 437 | typedef int (*check_reserved_t)(u64 start, u64 end, unsigned type); | 441 | typedef int (*check_reserved_t)(u64 start, u64 end, unsigned type); |
| 438 | 442 | ||
| 439 | static int __init is_mmconf_reserved(check_reserved_t is_reserved, | 443 | static int __init is_mmconf_reserved(check_reserved_t is_reserved, |
| 440 | u64 addr, u64 size, int i, | 444 | struct pci_mmcfg_region *cfg, int with_e820) |
| 441 | typeof(pci_mmcfg_config[0]) *cfg, int with_e820) | ||
| 442 | { | 445 | { |
| 446 | u64 addr = cfg->res.start; | ||
| 447 | u64 size = resource_size(&cfg->res); | ||
| 443 | u64 old_size = size; | 448 | u64 old_size = size; |
| 444 | int valid = 0; | 449 | int valid = 0, num_buses; |
| 445 | 450 | ||
| 446 | while (!is_reserved(addr, addr + size, E820_RESERVED)) { | 451 | while (!is_reserved(addr, addr + size, E820_RESERVED)) { |
| 447 | size >>= 1; | 452 | size >>= 1; |
| @@ -450,19 +455,25 @@ static int __init is_mmconf_reserved(check_reserved_t is_reserved, | |||
| 450 | } | 455 | } |
| 451 | 456 | ||
| 452 | if (size >= (16UL<<20) || size == old_size) { | 457 | if (size >= (16UL<<20) || size == old_size) { |
| 453 | printk(KERN_NOTICE | 458 | printk(KERN_INFO PREFIX "MMCONFIG at %pR reserved in %s\n", |
| 454 | "PCI: MCFG area at %Lx reserved in %s\n", | 459 | &cfg->res, |
| 455 | addr, with_e820?"E820":"ACPI motherboard resources"); | 460 | with_e820 ? "E820" : "ACPI motherboard resources"); |
| 456 | valid = 1; | 461 | valid = 1; |
| 457 | 462 | ||
| 458 | if (old_size != size) { | 463 | if (old_size != size) { |
| 459 | /* update end_bus_number */ | 464 | /* update end_bus */ |
| 460 | cfg->end_bus_number = cfg->start_bus_number + ((size>>20) - 1); | 465 | cfg->end_bus = cfg->start_bus + ((size>>20) - 1); |
| 461 | printk(KERN_NOTICE "PCI: updated MCFG configuration %d: base %lx " | 466 | num_buses = cfg->end_bus - cfg->start_bus + 1; |
| 462 | "segment %hu buses %u - %u\n", | 467 | cfg->res.end = cfg->res.start + |
| 463 | i, (unsigned long)cfg->address, cfg->pci_segment, | 468 | PCI_MMCFG_BUS_OFFSET(num_buses) - 1; |
| 464 | (unsigned int)cfg->start_bus_number, | 469 | snprintf(cfg->name, PCI_MMCFG_RESOURCE_NAME_LEN, |
| 465 | (unsigned int)cfg->end_bus_number); | 470 | "PCI MMCONFIG %04x [bus %02x-%02x]", |
| 471 | cfg->segment, cfg->start_bus, cfg->end_bus); | ||
| 472 | printk(KERN_INFO PREFIX | ||
| 473 | "MMCONFIG for %04x [bus%02x-%02x] " | ||
| 474 | "at %pR (base %#lx) (size reduced!)\n", | ||
| 475 | cfg->segment, cfg->start_bus, cfg->end_bus, | ||
| 476 | &cfg->res, (unsigned long) cfg->address); | ||
| 466 | } | 477 | } |
| 467 | } | 478 | } |
| 468 | 479 | ||
| @@ -471,45 +482,26 @@ static int __init is_mmconf_reserved(check_reserved_t is_reserved, | |||
| 471 | 482 | ||
| 472 | static void __init pci_mmcfg_reject_broken(int early) | 483 | static void __init pci_mmcfg_reject_broken(int early) |
| 473 | { | 484 | { |
| 474 | typeof(pci_mmcfg_config[0]) *cfg; | 485 | struct pci_mmcfg_region *cfg; |
| 475 | int i; | ||
| 476 | 486 | ||
| 477 | if ((pci_mmcfg_config_num == 0) || | 487 | list_for_each_entry(cfg, &pci_mmcfg_list, list) { |
| 478 | (pci_mmcfg_config == NULL) || | ||
| 479 | (pci_mmcfg_config[0].address == 0)) | ||
| 480 | return; | ||
| 481 | |||
| 482 | for (i = 0; i < pci_mmcfg_config_num; i++) { | ||
| 483 | int valid = 0; | 488 | int valid = 0; |
| 484 | u64 addr, size; | ||
| 485 | |||
| 486 | cfg = &pci_mmcfg_config[i]; | ||
| 487 | addr = cfg->start_bus_number; | ||
| 488 | addr <<= 20; | ||
| 489 | addr += cfg->address; | ||
| 490 | size = cfg->end_bus_number + 1 - cfg->start_bus_number; | ||
| 491 | size <<= 20; | ||
| 492 | printk(KERN_NOTICE "PCI: MCFG configuration %d: base %lx " | ||
| 493 | "segment %hu buses %u - %u\n", | ||
| 494 | i, (unsigned long)cfg->address, cfg->pci_segment, | ||
| 495 | (unsigned int)cfg->start_bus_number, | ||
| 496 | (unsigned int)cfg->end_bus_number); | ||
| 497 | 489 | ||
| 498 | if (!early && !acpi_disabled) | 490 | if (!early && !acpi_disabled) |
| 499 | valid = is_mmconf_reserved(is_acpi_reserved, addr, size, i, cfg, 0); | 491 | valid = is_mmconf_reserved(is_acpi_reserved, cfg, 0); |
| 500 | 492 | ||
| 501 | if (valid) | 493 | if (valid) |
| 502 | continue; | 494 | continue; |
| 503 | 495 | ||
| 504 | if (!early) | 496 | if (!early) |
| 505 | printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %Lx is not" | 497 | printk(KERN_ERR FW_BUG PREFIX |
| 506 | " reserved in ACPI motherboard resources\n", | 498 | "MMCONFIG at %pR not reserved in " |
| 507 | cfg->address); | 499 | "ACPI motherboard resources\n", &cfg->res); |
| 508 | 500 | ||
| 509 | /* Don't try to do this check unless configuration | 501 | /* Don't try to do this check unless configuration |
| 510 | type 1 is available. how about type 2 ?*/ | 502 | type 1 is available. how about type 2 ?*/ |
| 511 | if (raw_pci_ops) | 503 | if (raw_pci_ops) |
| 512 | valid = is_mmconf_reserved(e820_all_mapped, addr, size, i, cfg, 1); | 504 | valid = is_mmconf_reserved(e820_all_mapped, cfg, 1); |
| 513 | 505 | ||
| 514 | if (!valid) | 506 | if (!valid) |
| 515 | goto reject; | 507 | goto reject; |
| @@ -518,34 +510,41 @@ static void __init pci_mmcfg_reject_broken(int early) | |||
| 518 | return; | 510 | return; |
| 519 | 511 | ||
| 520 | reject: | 512 | reject: |
| 521 | printk(KERN_INFO "PCI: Not using MMCONFIG.\n"); | 513 | printk(KERN_INFO PREFIX "not using MMCONFIG\n"); |
| 522 | pci_mmcfg_arch_free(); | 514 | free_all_mmcfg(); |
| 523 | kfree(pci_mmcfg_config); | ||
| 524 | pci_mmcfg_config = NULL; | ||
| 525 | pci_mmcfg_config_num = 0; | ||
| 526 | } | 515 | } |
| 527 | 516 | ||
| 528 | static int __initdata known_bridge; | 517 | static int __initdata known_bridge; |
| 529 | 518 | ||
| 530 | static int acpi_mcfg_64bit_base_addr __initdata = FALSE; | 519 | static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg, |
| 520 | struct acpi_mcfg_allocation *cfg) | ||
| 521 | { | ||
| 522 | int year; | ||
| 531 | 523 | ||
| 532 | /* The physical address of the MMCONFIG aperture. Set from ACPI tables. */ | 524 | if (cfg->address < 0xFFFFFFFF) |
| 533 | struct acpi_mcfg_allocation *pci_mmcfg_config; | 525 | return 0; |
| 534 | int pci_mmcfg_config_num; | ||
| 535 | 526 | ||
| 536 | static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg) | ||
| 537 | { | ||
| 538 | if (!strcmp(mcfg->header.oem_id, "SGI")) | 527 | if (!strcmp(mcfg->header.oem_id, "SGI")) |
| 539 | acpi_mcfg_64bit_base_addr = TRUE; | 528 | return 0; |
| 540 | 529 | ||
| 541 | return 0; | 530 | if (mcfg->header.revision >= 1) { |
| 531 | if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && | ||
| 532 | year >= 2010) | ||
| 533 | return 0; | ||
| 534 | } | ||
| 535 | |||
| 536 | printk(KERN_ERR PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx " | ||
| 537 | "is above 4GB, ignored\n", cfg->pci_segment, | ||
| 538 | cfg->start_bus_number, cfg->end_bus_number, cfg->address); | ||
| 539 | return -EINVAL; | ||
| 542 | } | 540 | } |
| 543 | 541 | ||
| 544 | static int __init pci_parse_mcfg(struct acpi_table_header *header) | 542 | static int __init pci_parse_mcfg(struct acpi_table_header *header) |
| 545 | { | 543 | { |
| 546 | struct acpi_table_mcfg *mcfg; | 544 | struct acpi_table_mcfg *mcfg; |
| 545 | struct acpi_mcfg_allocation *cfg_table, *cfg; | ||
| 547 | unsigned long i; | 546 | unsigned long i; |
| 548 | int config_size; | 547 | int entries; |
| 549 | 548 | ||
| 550 | if (!header) | 549 | if (!header) |
| 551 | return -EINVAL; | 550 | return -EINVAL; |
| @@ -553,38 +552,33 @@ static int __init pci_parse_mcfg(struct acpi_table_header *header) | |||
| 553 | mcfg = (struct acpi_table_mcfg *)header; | 552 | mcfg = (struct acpi_table_mcfg *)header; |
| 554 | 553 | ||
| 555 | /* how many config structures do we have */ | 554 | /* how many config structures do we have */ |
| 556 | pci_mmcfg_config_num = 0; | 555 | free_all_mmcfg(); |
| 556 | entries = 0; | ||
| 557 | i = header->length - sizeof(struct acpi_table_mcfg); | 557 | i = header->length - sizeof(struct acpi_table_mcfg); |
| 558 | while (i >= sizeof(struct acpi_mcfg_allocation)) { | 558 | while (i >= sizeof(struct acpi_mcfg_allocation)) { |
| 559 | ++pci_mmcfg_config_num; | 559 | entries++; |
| 560 | i -= sizeof(struct acpi_mcfg_allocation); | 560 | i -= sizeof(struct acpi_mcfg_allocation); |
| 561 | }; | 561 | }; |
| 562 | if (pci_mmcfg_config_num == 0) { | 562 | if (entries == 0) { |
| 563 | printk(KERN_ERR PREFIX "MMCONFIG has no entries\n"); | 563 | printk(KERN_ERR PREFIX "MMCONFIG has no entries\n"); |
| 564 | return -ENODEV; | 564 | return -ENODEV; |
| 565 | } | 565 | } |
| 566 | 566 | ||
| 567 | config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config); | 567 | cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1]; |
| 568 | pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL); | 568 | for (i = 0; i < entries; i++) { |
| 569 | if (!pci_mmcfg_config) { | 569 | cfg = &cfg_table[i]; |
| 570 | printk(KERN_WARNING PREFIX | 570 | if (acpi_mcfg_check_entry(mcfg, cfg)) { |
| 571 | "No memory for MCFG config tables\n"); | 571 | free_all_mmcfg(); |
| 572 | return -ENOMEM; | ||
| 573 | } | ||
| 574 | |||
| 575 | memcpy(pci_mmcfg_config, &mcfg[1], config_size); | ||
| 576 | |||
| 577 | acpi_mcfg_oem_check(mcfg); | ||
| 578 | |||
| 579 | for (i = 0; i < pci_mmcfg_config_num; ++i) { | ||
| 580 | if ((pci_mmcfg_config[i].address > 0xFFFFFFFF) && | ||
| 581 | !acpi_mcfg_64bit_base_addr) { | ||
| 582 | printk(KERN_ERR PREFIX | ||
| 583 | "MMCONFIG not in low 4GB of memory\n"); | ||
| 584 | kfree(pci_mmcfg_config); | ||
| 585 | pci_mmcfg_config_num = 0; | ||
| 586 | return -ENODEV; | 572 | return -ENODEV; |
| 587 | } | 573 | } |
| 574 | |||
| 575 | if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number, | ||
| 576 | cfg->end_bus_number, cfg->address) == NULL) { | ||
| 577 | printk(KERN_WARNING PREFIX | ||
| 578 | "no memory for MCFG entries\n"); | ||
| 579 | free_all_mmcfg(); | ||
| 580 | return -ENOMEM; | ||
| 581 | } | ||
| 588 | } | 582 | } |
| 589 | 583 | ||
| 590 | return 0; | 584 | return 0; |
| @@ -614,9 +608,7 @@ static void __init __pci_mmcfg_init(int early) | |||
| 614 | 608 | ||
| 615 | pci_mmcfg_reject_broken(early); | 609 | pci_mmcfg_reject_broken(early); |
| 616 | 610 | ||
| 617 | if ((pci_mmcfg_config_num == 0) || | 611 | if (list_empty(&pci_mmcfg_list)) |
| 618 | (pci_mmcfg_config == NULL) || | ||
| 619 | (pci_mmcfg_config[0].address == 0)) | ||
| 620 | return; | 612 | return; |
| 621 | 613 | ||
| 622 | if (pci_mmcfg_arch_init()) | 614 | if (pci_mmcfg_arch_init()) |
| @@ -648,9 +640,7 @@ static int __init pci_mmcfg_late_insert_resources(void) | |||
| 648 | */ | 640 | */ |
| 649 | if ((pci_mmcfg_resources_inserted == 1) || | 641 | if ((pci_mmcfg_resources_inserted == 1) || |
| 650 | (pci_probe & PCI_PROBE_MMCONF) == 0 || | 642 | (pci_probe & PCI_PROBE_MMCONF) == 0 || |
| 651 | (pci_mmcfg_config_num == 0) || | 643 | list_empty(&pci_mmcfg_list)) |
| 652 | (pci_mmcfg_config == NULL) || | ||
| 653 | (pci_mmcfg_config[0].address == 0)) | ||
| 654 | return 1; | 644 | return 1; |
| 655 | 645 | ||
| 656 | /* | 646 | /* |
diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c index f10a7e94a84c..90d5fd476ed4 100644 --- a/arch/x86/pci/mmconfig_32.c +++ b/arch/x86/pci/mmconfig_32.c | |||
| @@ -27,18 +27,10 @@ static int mmcfg_last_accessed_cpu; | |||
| 27 | */ | 27 | */ |
| 28 | static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn) | 28 | static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn) |
| 29 | { | 29 | { |
| 30 | struct acpi_mcfg_allocation *cfg; | 30 | struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus); |
| 31 | int cfg_num; | ||
| 32 | |||
| 33 | for (cfg_num = 0; cfg_num < pci_mmcfg_config_num; cfg_num++) { | ||
| 34 | cfg = &pci_mmcfg_config[cfg_num]; | ||
| 35 | if (cfg->pci_segment == seg && | ||
| 36 | (cfg->start_bus_number <= bus) && | ||
| 37 | (cfg->end_bus_number >= bus)) | ||
| 38 | return cfg->address; | ||
| 39 | } | ||
| 40 | 31 | ||
| 41 | /* Fall back to type 0 */ | 32 | if (cfg) |
| 33 | return cfg->address; | ||
| 42 | return 0; | 34 | return 0; |
| 43 | } | 35 | } |
| 44 | 36 | ||
| @@ -47,7 +39,7 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn) | |||
| 47 | */ | 39 | */ |
| 48 | static void pci_exp_set_dev_base(unsigned int base, int bus, int devfn) | 40 | static void pci_exp_set_dev_base(unsigned int base, int bus, int devfn) |
| 49 | { | 41 | { |
| 50 | u32 dev_base = base | (bus << 20) | (devfn << 12); | 42 | u32 dev_base = base | PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12); |
| 51 | int cpu = smp_processor_id(); | 43 | int cpu = smp_processor_id(); |
| 52 | if (dev_base != mmcfg_last_accessed_device || | 44 | if (dev_base != mmcfg_last_accessed_device || |
| 53 | cpu != mmcfg_last_accessed_cpu) { | 45 | cpu != mmcfg_last_accessed_cpu) { |
diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c index 94349f8b2f96..e783841bd1d7 100644 --- a/arch/x86/pci/mmconfig_64.c +++ b/arch/x86/pci/mmconfig_64.c | |||
| @@ -12,38 +12,15 @@ | |||
| 12 | #include <asm/e820.h> | 12 | #include <asm/e820.h> |
| 13 | #include <asm/pci_x86.h> | 13 | #include <asm/pci_x86.h> |
| 14 | 14 | ||
| 15 | /* Static virtual mapping of the MMCONFIG aperture */ | 15 | #define PREFIX "PCI: " |
| 16 | struct mmcfg_virt { | ||
| 17 | struct acpi_mcfg_allocation *cfg; | ||
| 18 | char __iomem *virt; | ||
| 19 | }; | ||
| 20 | static struct mmcfg_virt *pci_mmcfg_virt; | ||
| 21 | |||
| 22 | static char __iomem *get_virt(unsigned int seg, unsigned bus) | ||
| 23 | { | ||
| 24 | struct acpi_mcfg_allocation *cfg; | ||
| 25 | int cfg_num; | ||
| 26 | |||
| 27 | for (cfg_num = 0; cfg_num < pci_mmcfg_config_num; cfg_num++) { | ||
| 28 | cfg = pci_mmcfg_virt[cfg_num].cfg; | ||
| 29 | if (cfg->pci_segment == seg && | ||
| 30 | (cfg->start_bus_number <= bus) && | ||
| 31 | (cfg->end_bus_number >= bus)) | ||
| 32 | return pci_mmcfg_virt[cfg_num].virt; | ||
| 33 | } | ||
| 34 | |||
| 35 | /* Fall back to type 0 */ | ||
| 36 | return NULL; | ||
| 37 | } | ||
| 38 | 16 | ||
| 39 | static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn) | 17 | static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn) |
| 40 | { | 18 | { |
| 41 | char __iomem *addr; | 19 | struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus); |
| 42 | 20 | ||
| 43 | addr = get_virt(seg, bus); | 21 | if (cfg && cfg->virt) |
| 44 | if (!addr) | 22 | return cfg->virt + (PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12)); |
| 45 | return NULL; | 23 | return NULL; |
| 46 | return addr + ((bus << 20) | (devfn << 12)); | ||
| 47 | } | 24 | } |
| 48 | 25 | ||
| 49 | static int pci_mmcfg_read(unsigned int seg, unsigned int bus, | 26 | static int pci_mmcfg_read(unsigned int seg, unsigned int bus, |
| @@ -109,42 +86,30 @@ static struct pci_raw_ops pci_mmcfg = { | |||
| 109 | .write = pci_mmcfg_write, | 86 | .write = pci_mmcfg_write, |
| 110 | }; | 87 | }; |
| 111 | 88 | ||
| 112 | static void __iomem * __init mcfg_ioremap(struct acpi_mcfg_allocation *cfg) | 89 | static void __iomem * __init mcfg_ioremap(struct pci_mmcfg_region *cfg) |
| 113 | { | 90 | { |
| 114 | void __iomem *addr; | 91 | void __iomem *addr; |
| 115 | u64 start, size; | 92 | u64 start, size; |
| 93 | int num_buses; | ||
| 116 | 94 | ||
| 117 | start = cfg->start_bus_number; | 95 | start = cfg->address + PCI_MMCFG_BUS_OFFSET(cfg->start_bus); |
| 118 | start <<= 20; | 96 | num_buses = cfg->end_bus - cfg->start_bus + 1; |
| 119 | start += cfg->address; | 97 | size = PCI_MMCFG_BUS_OFFSET(num_buses); |
| 120 | size = cfg->end_bus_number + 1 - cfg->start_bus_number; | ||
| 121 | size <<= 20; | ||
| 122 | addr = ioremap_nocache(start, size); | 98 | addr = ioremap_nocache(start, size); |
| 123 | if (addr) { | 99 | if (addr) |
| 124 | printk(KERN_INFO "PCI: Using MMCONFIG at %Lx - %Lx\n", | 100 | addr -= PCI_MMCFG_BUS_OFFSET(cfg->start_bus); |
| 125 | start, start + size - 1); | ||
| 126 | addr -= cfg->start_bus_number << 20; | ||
| 127 | } | ||
| 128 | return addr; | 101 | return addr; |
| 129 | } | 102 | } |
| 130 | 103 | ||
| 131 | int __init pci_mmcfg_arch_init(void) | 104 | int __init pci_mmcfg_arch_init(void) |
| 132 | { | 105 | { |
| 133 | int i; | 106 | struct pci_mmcfg_region *cfg; |
| 134 | pci_mmcfg_virt = kzalloc(sizeof(*pci_mmcfg_virt) * | ||
| 135 | pci_mmcfg_config_num, GFP_KERNEL); | ||
| 136 | if (pci_mmcfg_virt == NULL) { | ||
| 137 | printk(KERN_ERR "PCI: Can not allocate memory for mmconfig structures\n"); | ||
| 138 | return 0; | ||
| 139 | } | ||
| 140 | 107 | ||
| 141 | for (i = 0; i < pci_mmcfg_config_num; ++i) { | 108 | list_for_each_entry(cfg, &pci_mmcfg_list, list) { |
| 142 | pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i]; | 109 | cfg->virt = mcfg_ioremap(cfg); |
| 143 | pci_mmcfg_virt[i].virt = mcfg_ioremap(&pci_mmcfg_config[i]); | 110 | if (!cfg->virt) { |
| 144 | if (!pci_mmcfg_virt[i].virt) { | 111 | printk(KERN_ERR PREFIX "can't map MMCONFIG at %pR\n", |
| 145 | printk(KERN_ERR "PCI: Cannot map mmconfig aperture for " | 112 | &cfg->res); |
| 146 | "segment %d\n", | ||
| 147 | pci_mmcfg_config[i].pci_segment); | ||
| 148 | pci_mmcfg_arch_free(); | 113 | pci_mmcfg_arch_free(); |
| 149 | return 0; | 114 | return 0; |
| 150 | } | 115 | } |
| @@ -155,19 +120,12 @@ int __init pci_mmcfg_arch_init(void) | |||
| 155 | 120 | ||
| 156 | void __init pci_mmcfg_arch_free(void) | 121 | void __init pci_mmcfg_arch_free(void) |
| 157 | { | 122 | { |
| 158 | int i; | 123 | struct pci_mmcfg_region *cfg; |
| 159 | |||
| 160 | if (pci_mmcfg_virt == NULL) | ||
| 161 | return; | ||
| 162 | 124 | ||
| 163 | for (i = 0; i < pci_mmcfg_config_num; ++i) { | 125 | list_for_each_entry(cfg, &pci_mmcfg_list, list) { |
| 164 | if (pci_mmcfg_virt[i].virt) { | 126 | if (cfg->virt) { |
| 165 | iounmap(pci_mmcfg_virt[i].virt + (pci_mmcfg_virt[i].cfg->start_bus_number << 20)); | 127 | iounmap(cfg->virt + PCI_MMCFG_BUS_OFFSET(cfg->start_bus)); |
| 166 | pci_mmcfg_virt[i].virt = NULL; | 128 | cfg->virt = NULL; |
| 167 | pci_mmcfg_virt[i].cfg = NULL; | ||
| 168 | } | 129 | } |
| 169 | } | 130 | } |
| 170 | |||
| 171 | kfree(pci_mmcfg_virt); | ||
| 172 | pci_mmcfg_virt = NULL; | ||
| 173 | } | 131 | } |
diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/test_get_len.c index d8214dc03fa7..bee8d6ac2691 100644 --- a/arch/x86/tools/test_get_len.c +++ b/arch/x86/tools/test_get_len.c | |||
| @@ -113,7 +113,7 @@ int main(int argc, char **argv) | |||
| 113 | char line[BUFSIZE], sym[BUFSIZE] = "<unknown>"; | 113 | char line[BUFSIZE], sym[BUFSIZE] = "<unknown>"; |
| 114 | unsigned char insn_buf[16]; | 114 | unsigned char insn_buf[16]; |
| 115 | struct insn insn; | 115 | struct insn insn; |
| 116 | int insns = 0, c; | 116 | int insns = 0; |
| 117 | int warnings = 0; | 117 | int warnings = 0; |
| 118 | 118 | ||
| 119 | parse_args(argc, argv); | 119 | parse_args(argc, argv); |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index c462cea8ef09..2b26dd5930c6 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
| @@ -27,7 +27,9 @@ | |||
| 27 | #include <linux/page-flags.h> | 27 | #include <linux/page-flags.h> |
| 28 | #include <linux/highmem.h> | 28 | #include <linux/highmem.h> |
| 29 | #include <linux/console.h> | 29 | #include <linux/console.h> |
| 30 | #include <linux/pci.h> | ||
| 30 | 31 | ||
| 32 | #include <xen/xen.h> | ||
| 31 | #include <xen/interface/xen.h> | 33 | #include <xen/interface/xen.h> |
| 32 | #include <xen/interface/version.h> | 34 | #include <xen/interface/version.h> |
| 33 | #include <xen/interface/physdev.h> | 35 | #include <xen/interface/physdev.h> |
| @@ -138,24 +140,23 @@ static void xen_vcpu_setup(int cpu) | |||
| 138 | */ | 140 | */ |
| 139 | void xen_vcpu_restore(void) | 141 | void xen_vcpu_restore(void) |
| 140 | { | 142 | { |
| 141 | if (have_vcpu_info_placement) { | 143 | int cpu; |
| 142 | int cpu; | ||
| 143 | 144 | ||
| 144 | for_each_online_cpu(cpu) { | 145 | for_each_online_cpu(cpu) { |
| 145 | bool other_cpu = (cpu != smp_processor_id()); | 146 | bool other_cpu = (cpu != smp_processor_id()); |
| 146 | 147 | ||
| 147 | if (other_cpu && | 148 | if (other_cpu && |
| 148 | HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL)) | 149 | HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL)) |
| 149 | BUG(); | 150 | BUG(); |
| 150 | 151 | ||
| 151 | xen_vcpu_setup(cpu); | 152 | xen_setup_runstate_info(cpu); |
| 152 | 153 | ||
| 153 | if (other_cpu && | 154 | if (have_vcpu_info_placement) |
| 154 | HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)) | 155 | xen_vcpu_setup(cpu); |
| 155 | BUG(); | ||
| 156 | } | ||
| 157 | 156 | ||
| 158 | BUG_ON(!have_vcpu_info_placement); | 157 | if (other_cpu && |
| 158 | HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)) | ||
| 159 | BUG(); | ||
| 159 | } | 160 | } |
| 160 | } | 161 | } |
| 161 | 162 | ||
| @@ -1176,10 +1177,16 @@ asmlinkage void __init xen_start_kernel(void) | |||
| 1176 | add_preferred_console("xenboot", 0, NULL); | 1177 | add_preferred_console("xenboot", 0, NULL); |
| 1177 | add_preferred_console("tty", 0, NULL); | 1178 | add_preferred_console("tty", 0, NULL); |
| 1178 | add_preferred_console("hvc", 0, NULL); | 1179 | add_preferred_console("hvc", 0, NULL); |
| 1180 | } else { | ||
| 1181 | /* Make sure ACS will be enabled */ | ||
| 1182 | pci_request_acs(); | ||
| 1179 | } | 1183 | } |
| 1184 | |||
| 1180 | 1185 | ||
| 1181 | xen_raw_console_write("about to get started...\n"); | 1186 | xen_raw_console_write("about to get started...\n"); |
| 1182 | 1187 | ||
| 1188 | xen_setup_runstate_info(0); | ||
| 1189 | |||
| 1183 | /* Start the world */ | 1190 | /* Start the world */ |
| 1184 | #ifdef CONFIG_X86_32 | 1191 | #ifdef CONFIG_X86_32 |
| 1185 | i386_start_kernel(); | 1192 | i386_start_kernel(); |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 3bf7b1d250ce..bf4cd6bfe959 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
| @@ -185,7 +185,7 @@ static inline unsigned p2m_index(unsigned long pfn) | |||
| 185 | } | 185 | } |
| 186 | 186 | ||
| 187 | /* Build the parallel p2m_top_mfn structures */ | 187 | /* Build the parallel p2m_top_mfn structures */ |
| 188 | static void __init xen_build_mfn_list_list(void) | 188 | void xen_build_mfn_list_list(void) |
| 189 | { | 189 | { |
| 190 | unsigned pfn, idx; | 190 | unsigned pfn, idx; |
| 191 | 191 | ||
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 738da0cb0d8b..563d20504988 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
| @@ -35,10 +35,10 @@ | |||
| 35 | 35 | ||
| 36 | cpumask_var_t xen_cpu_initialized_map; | 36 | cpumask_var_t xen_cpu_initialized_map; |
| 37 | 37 | ||
| 38 | static DEFINE_PER_CPU(int, resched_irq); | 38 | static DEFINE_PER_CPU(int, xen_resched_irq); |
| 39 | static DEFINE_PER_CPU(int, callfunc_irq); | 39 | static DEFINE_PER_CPU(int, xen_callfunc_irq); |
| 40 | static DEFINE_PER_CPU(int, callfuncsingle_irq); | 40 | static DEFINE_PER_CPU(int, xen_callfuncsingle_irq); |
| 41 | static DEFINE_PER_CPU(int, debug_irq) = -1; | 41 | static DEFINE_PER_CPU(int, xen_debug_irq) = -1; |
| 42 | 42 | ||
| 43 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); | 43 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); |
| 44 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); | 44 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); |
| @@ -103,7 +103,7 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
| 103 | NULL); | 103 | NULL); |
| 104 | if (rc < 0) | 104 | if (rc < 0) |
| 105 | goto fail; | 105 | goto fail; |
| 106 | per_cpu(resched_irq, cpu) = rc; | 106 | per_cpu(xen_resched_irq, cpu) = rc; |
| 107 | 107 | ||
| 108 | callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); | 108 | callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); |
| 109 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, | 109 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, |
| @@ -114,7 +114,7 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
| 114 | NULL); | 114 | NULL); |
| 115 | if (rc < 0) | 115 | if (rc < 0) |
| 116 | goto fail; | 116 | goto fail; |
| 117 | per_cpu(callfunc_irq, cpu) = rc; | 117 | per_cpu(xen_callfunc_irq, cpu) = rc; |
| 118 | 118 | ||
| 119 | debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); | 119 | debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); |
| 120 | rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, | 120 | rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, |
| @@ -122,7 +122,7 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
| 122 | debug_name, NULL); | 122 | debug_name, NULL); |
| 123 | if (rc < 0) | 123 | if (rc < 0) |
| 124 | goto fail; | 124 | goto fail; |
| 125 | per_cpu(debug_irq, cpu) = rc; | 125 | per_cpu(xen_debug_irq, cpu) = rc; |
| 126 | 126 | ||
| 127 | callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); | 127 | callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); |
| 128 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, | 128 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, |
| @@ -133,19 +133,20 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
| 133 | NULL); | 133 | NULL); |
| 134 | if (rc < 0) | 134 | if (rc < 0) |
| 135 | goto fail; | 135 | goto fail; |
| 136 | per_cpu(callfuncsingle_irq, cpu) = rc; | 136 | per_cpu(xen_callfuncsingle_irq, cpu) = rc; |
| 137 | 137 | ||
| 138 | return 0; | 138 | return 0; |
| 139 | 139 | ||
| 140 | fail: | 140 | fail: |
| 141 | if (per_cpu(resched_irq, cpu) >= 0) | 141 | if (per_cpu(xen_resched_irq, cpu) >= 0) |
| 142 | unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); | 142 | unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); |
| 143 | if (per_cpu(callfunc_irq, cpu) >= 0) | 143 | if (per_cpu(xen_callfunc_irq, cpu) >= 0) |
| 144 | unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); | 144 | unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); |
| 145 | if (per_cpu(debug_irq, cpu) >= 0) | 145 | if (per_cpu(xen_debug_irq, cpu) >= 0) |
| 146 | unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); | 146 | unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); |
| 147 | if (per_cpu(callfuncsingle_irq, cpu) >= 0) | 147 | if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) |
| 148 | unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); | 148 | unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), |
| 149 | NULL); | ||
| 149 | 150 | ||
| 150 | return rc; | 151 | return rc; |
| 151 | } | 152 | } |
| @@ -295,6 +296,7 @@ static int __cpuinit xen_cpu_up(unsigned int cpu) | |||
| 295 | (unsigned long)task_stack_page(idle) - | 296 | (unsigned long)task_stack_page(idle) - |
| 296 | KERNEL_STACK_OFFSET + THREAD_SIZE; | 297 | KERNEL_STACK_OFFSET + THREAD_SIZE; |
| 297 | #endif | 298 | #endif |
| 299 | xen_setup_runstate_info(cpu); | ||
| 298 | xen_setup_timer(cpu); | 300 | xen_setup_timer(cpu); |
| 299 | xen_init_lock_cpu(cpu); | 301 | xen_init_lock_cpu(cpu); |
| 300 | 302 | ||
| @@ -348,10 +350,10 @@ static void xen_cpu_die(unsigned int cpu) | |||
| 348 | current->state = TASK_UNINTERRUPTIBLE; | 350 | current->state = TASK_UNINTERRUPTIBLE; |
| 349 | schedule_timeout(HZ/10); | 351 | schedule_timeout(HZ/10); |
| 350 | } | 352 | } |
| 351 | unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); | 353 | unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); |
| 352 | unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); | 354 | unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); |
| 353 | unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); | 355 | unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); |
| 354 | unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); | 356 | unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); |
| 355 | xen_uninit_lock_cpu(cpu); | 357 | xen_uninit_lock_cpu(cpu); |
| 356 | xen_teardown_timer(cpu); | 358 | xen_teardown_timer(cpu); |
| 357 | 359 | ||
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index 95be7b434724..987267f79bf5 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c | |||
| @@ -1,4 +1,5 @@ | |||
| 1 | #include <linux/types.h> | 1 | #include <linux/types.h> |
| 2 | #include <linux/clockchips.h> | ||
| 2 | 3 | ||
| 3 | #include <xen/interface/xen.h> | 4 | #include <xen/interface/xen.h> |
| 4 | #include <xen/grant_table.h> | 5 | #include <xen/grant_table.h> |
| @@ -27,6 +28,8 @@ void xen_pre_suspend(void) | |||
| 27 | 28 | ||
| 28 | void xen_post_suspend(int suspend_cancelled) | 29 | void xen_post_suspend(int suspend_cancelled) |
| 29 | { | 30 | { |
| 31 | xen_build_mfn_list_list(); | ||
| 32 | |||
| 30 | xen_setup_shared_info(); | 33 | xen_setup_shared_info(); |
| 31 | 34 | ||
| 32 | if (suspend_cancelled) { | 35 | if (suspend_cancelled) { |
| @@ -44,7 +47,19 @@ void xen_post_suspend(int suspend_cancelled) | |||
| 44 | 47 | ||
| 45 | } | 48 | } |
| 46 | 49 | ||
| 50 | static void xen_vcpu_notify_restore(void *data) | ||
| 51 | { | ||
| 52 | unsigned long reason = (unsigned long)data; | ||
| 53 | |||
| 54 | /* Boot processor notified via generic timekeeping_resume() */ | ||
| 55 | if ( smp_processor_id() == 0) | ||
| 56 | return; | ||
| 57 | |||
| 58 | clockevents_notify(reason, NULL); | ||
| 59 | } | ||
| 60 | |||
| 47 | void xen_arch_resume(void) | 61 | void xen_arch_resume(void) |
| 48 | { | 62 | { |
| 49 | /* nothing */ | 63 | smp_call_function(xen_vcpu_notify_restore, |
| 64 | (void *)CLOCK_EVT_NOTIFY_RESUME, 1); | ||
| 50 | } | 65 | } |
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 0a5aa44299a5..0d3f07cd1b5f 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
| @@ -31,14 +31,14 @@ | |||
| 31 | #define NS_PER_TICK (1000000000LL / HZ) | 31 | #define NS_PER_TICK (1000000000LL / HZ) |
| 32 | 32 | ||
| 33 | /* runstate info updated by Xen */ | 33 | /* runstate info updated by Xen */ |
| 34 | static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); | 34 | static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate); |
| 35 | 35 | ||
| 36 | /* snapshots of runstate info */ | 36 | /* snapshots of runstate info */ |
| 37 | static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate_snapshot); | 37 | static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot); |
| 38 | 38 | ||
| 39 | /* unused ns of stolen and blocked time */ | 39 | /* unused ns of stolen and blocked time */ |
| 40 | static DEFINE_PER_CPU(u64, residual_stolen); | 40 | static DEFINE_PER_CPU(u64, xen_residual_stolen); |
| 41 | static DEFINE_PER_CPU(u64, residual_blocked); | 41 | static DEFINE_PER_CPU(u64, xen_residual_blocked); |
| 42 | 42 | ||
| 43 | /* return an consistent snapshot of 64-bit time/counter value */ | 43 | /* return an consistent snapshot of 64-bit time/counter value */ |
| 44 | static u64 get64(const u64 *p) | 44 | static u64 get64(const u64 *p) |
| @@ -79,7 +79,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res) | |||
| 79 | 79 | ||
| 80 | BUG_ON(preemptible()); | 80 | BUG_ON(preemptible()); |
| 81 | 81 | ||
| 82 | state = &__get_cpu_var(runstate); | 82 | state = &__get_cpu_var(xen_runstate); |
| 83 | 83 | ||
| 84 | /* | 84 | /* |
| 85 | * The runstate info is always updated by the hypervisor on | 85 | * The runstate info is always updated by the hypervisor on |
| @@ -97,14 +97,14 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res) | |||
| 97 | /* return true when a vcpu could run but has no real cpu to run on */ | 97 | /* return true when a vcpu could run but has no real cpu to run on */ |
| 98 | bool xen_vcpu_stolen(int vcpu) | 98 | bool xen_vcpu_stolen(int vcpu) |
| 99 | { | 99 | { |
| 100 | return per_cpu(runstate, vcpu).state == RUNSTATE_runnable; | 100 | return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable; |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | static void setup_runstate_info(int cpu) | 103 | void xen_setup_runstate_info(int cpu) |
| 104 | { | 104 | { |
| 105 | struct vcpu_register_runstate_memory_area area; | 105 | struct vcpu_register_runstate_memory_area area; |
| 106 | 106 | ||
| 107 | area.addr.v = &per_cpu(runstate, cpu); | 107 | area.addr.v = &per_cpu(xen_runstate, cpu); |
| 108 | 108 | ||
| 109 | if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, | 109 | if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, |
| 110 | cpu, &area)) | 110 | cpu, &area)) |
| @@ -122,7 +122,7 @@ static void do_stolen_accounting(void) | |||
| 122 | 122 | ||
| 123 | WARN_ON(state.state != RUNSTATE_running); | 123 | WARN_ON(state.state != RUNSTATE_running); |
| 124 | 124 | ||
| 125 | snap = &__get_cpu_var(runstate_snapshot); | 125 | snap = &__get_cpu_var(xen_runstate_snapshot); |
| 126 | 126 | ||
| 127 | /* work out how much time the VCPU has not been runn*ing* */ | 127 | /* work out how much time the VCPU has not been runn*ing* */ |
| 128 | blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; | 128 | blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; |
| @@ -133,24 +133,24 @@ static void do_stolen_accounting(void) | |||
| 133 | 133 | ||
| 134 | /* Add the appropriate number of ticks of stolen time, | 134 | /* Add the appropriate number of ticks of stolen time, |
| 135 | including any left-overs from last time. */ | 135 | including any left-overs from last time. */ |
| 136 | stolen = runnable + offline + __get_cpu_var(residual_stolen); | 136 | stolen = runnable + offline + __get_cpu_var(xen_residual_stolen); |
| 137 | 137 | ||
| 138 | if (stolen < 0) | 138 | if (stolen < 0) |
| 139 | stolen = 0; | 139 | stolen = 0; |
| 140 | 140 | ||
| 141 | ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); | 141 | ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); |
| 142 | __get_cpu_var(residual_stolen) = stolen; | 142 | __get_cpu_var(xen_residual_stolen) = stolen; |
| 143 | account_steal_ticks(ticks); | 143 | account_steal_ticks(ticks); |
| 144 | 144 | ||
| 145 | /* Add the appropriate number of ticks of blocked time, | 145 | /* Add the appropriate number of ticks of blocked time, |
| 146 | including any left-overs from last time. */ | 146 | including any left-overs from last time. */ |
| 147 | blocked += __get_cpu_var(residual_blocked); | 147 | blocked += __get_cpu_var(xen_residual_blocked); |
| 148 | 148 | ||
| 149 | if (blocked < 0) | 149 | if (blocked < 0) |
| 150 | blocked = 0; | 150 | blocked = 0; |
| 151 | 151 | ||
| 152 | ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); | 152 | ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); |
| 153 | __get_cpu_var(residual_blocked) = blocked; | 153 | __get_cpu_var(xen_residual_blocked) = blocked; |
| 154 | account_idle_ticks(ticks); | 154 | account_idle_ticks(ticks); |
| 155 | } | 155 | } |
| 156 | 156 | ||
| @@ -434,7 +434,7 @@ void xen_setup_timer(int cpu) | |||
| 434 | name = "<timer kasprintf failed>"; | 434 | name = "<timer kasprintf failed>"; |
| 435 | 435 | ||
| 436 | irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, | 436 | irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, |
| 437 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, | 437 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER, |
| 438 | name, NULL); | 438 | name, NULL); |
| 439 | 439 | ||
| 440 | evt = &per_cpu(xen_clock_events, cpu); | 440 | evt = &per_cpu(xen_clock_events, cpu); |
| @@ -442,8 +442,6 @@ void xen_setup_timer(int cpu) | |||
| 442 | 442 | ||
| 443 | evt->cpumask = cpumask_of(cpu); | 443 | evt->cpumask = cpumask_of(cpu); |
| 444 | evt->irq = irq; | 444 | evt->irq = irq; |
| 445 | |||
| 446 | setup_runstate_info(cpu); | ||
| 447 | } | 445 | } |
| 448 | 446 | ||
| 449 | void xen_teardown_timer(int cpu) | 447 | void xen_teardown_timer(int cpu) |
| @@ -494,6 +492,7 @@ __init void xen_time_init(void) | |||
| 494 | 492 | ||
| 495 | setup_force_cpu_cap(X86_FEATURE_TSC); | 493 | setup_force_cpu_cap(X86_FEATURE_TSC); |
| 496 | 494 | ||
| 495 | xen_setup_runstate_info(cpu); | ||
| 497 | xen_setup_timer(cpu); | 496 | xen_setup_timer(cpu); |
| 498 | xen_setup_cpu_clockevents(); | 497 | xen_setup_cpu_clockevents(); |
| 499 | } | 498 | } |
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index 02f496a8dbaa..53adefda4275 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S | |||
| @@ -96,7 +96,7 @@ ENTRY(xen_sysret32) | |||
| 96 | pushq $__USER32_CS | 96 | pushq $__USER32_CS |
| 97 | pushq %rcx | 97 | pushq %rcx |
| 98 | 98 | ||
| 99 | pushq $VGCF_in_syscall | 99 | pushq $0 |
| 100 | 1: jmp hypercall_iret | 100 | 1: jmp hypercall_iret |
| 101 | ENDPATCH(xen_sysret32) | 101 | ENDPATCH(xen_sysret32) |
| 102 | RELOC(xen_sysret32, 1b+1) | 102 | RELOC(xen_sysret32, 1b+1) |
| @@ -151,7 +151,7 @@ ENTRY(xen_syscall32_target) | |||
| 151 | ENTRY(xen_sysenter_target) | 151 | ENTRY(xen_sysenter_target) |
| 152 | lea 16(%rsp), %rsp /* strip %rcx, %r11 */ | 152 | lea 16(%rsp), %rsp /* strip %rcx, %r11 */ |
| 153 | mov $-ENOSYS, %rax | 153 | mov $-ENOSYS, %rax |
| 154 | pushq $VGCF_in_syscall | 154 | pushq $0 |
| 155 | jmp hypercall_iret | 155 | jmp hypercall_iret |
| 156 | ENDPROC(xen_syscall32_target) | 156 | ENDPROC(xen_syscall32_target) |
| 157 | ENDPROC(xen_sysenter_target) | 157 | ENDPROC(xen_sysenter_target) |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 355fa6b99c9c..f9153a300bce 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
| @@ -25,6 +25,7 @@ extern struct shared_info *HYPERVISOR_shared_info; | |||
| 25 | 25 | ||
| 26 | void xen_setup_mfn_list_list(void); | 26 | void xen_setup_mfn_list_list(void); |
| 27 | void xen_setup_shared_info(void); | 27 | void xen_setup_shared_info(void); |
| 28 | void xen_build_mfn_list_list(void); | ||
| 28 | void xen_setup_machphys_mapping(void); | 29 | void xen_setup_machphys_mapping(void); |
| 29 | pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); | 30 | pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); |
| 30 | void xen_ident_map_ISA(void); | 31 | void xen_ident_map_ISA(void); |
| @@ -41,6 +42,7 @@ void __init xen_build_dynamic_phys_to_machine(void); | |||
| 41 | 42 | ||
| 42 | void xen_init_irq_ops(void); | 43 | void xen_init_irq_ops(void); |
| 43 | void xen_setup_timer(int cpu); | 44 | void xen_setup_timer(int cpu); |
| 45 | void xen_setup_runstate_info(int cpu); | ||
| 44 | void xen_teardown_timer(int cpu); | 46 | void xen_teardown_timer(int cpu); |
| 45 | cycle_t xen_clocksource_read(void); | 47 | cycle_t xen_clocksource_read(void); |
| 46 | void xen_setup_cpu_clockevents(void); | 48 | void xen_setup_cpu_clockevents(void); |
