aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-28 01:18:55 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-28 01:18:55 -0400
commit6e1b97d899401035dd9d02b0fd93ad54641f867a (patch)
treef98006592293d38b25a30ab08c3ae88601ac76c1
parent36ea96a485ce09a88819896e48468d6469c292ab (diff)
parent69243f91257083795065762ce805120b980e256b (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: x86: Dump filtering supports x86_64 sparsemem x86: fix compiler warnings in arch/x86/kernel/early-quirks.c x86: fix !SMP compiler warning in arch/x86/kernel/acpi/processor.c x86: Fix boot protocol KEEP_SEGMENTS check. x86: voyager: fix bogus conversion to per_cpu for boot_cpu_info x86: export smp_ops to allow modular build of KVM Revert "i386: export i386 smp_call_function_mask() to modules"
-rw-r--r--arch/x86/boot/compressed/head_32.S12
-rw-r--r--arch/x86/boot/compressed/head_64.S7
-rw-r--r--arch/x86/kernel/acpi/processor.c3
-rw-r--r--arch/x86/kernel/asm-offsets_64.c10
-rw-r--r--arch/x86/kernel/early-quirks.c4
-rw-r--r--arch/x86/kernel/machine_kexec_64.c2
-rw-r--r--arch/x86/kernel/smp_32.c8
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c4
-rw-r--r--include/asm-x86/smp_32.h9
9 files changed, 36 insertions, 23 deletions
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index a0ae2e7f6cec..036e635f18a3 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -33,24 +33,20 @@
33 .globl startup_32 33 .globl startup_32
34 34
35startup_32: 35startup_32:
36 /* check to see if KEEP_SEGMENTS flag is meaningful */ 36 cld
37 cmpw $0x207, BP_version(%esi)
38 jb 1f
39
40 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 37 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
41 * us to not reload segments */ 38 * us to not reload segments */
42 testb $(1<<6), BP_loadflags(%esi) 39 testb $(1<<6), BP_loadflags(%esi)
43 jnz 2f 40 jnz 1f
44 41
451: cli 42 cli
46 movl $(__BOOT_DS),%eax 43 movl $(__BOOT_DS),%eax
47 movl %eax,%ds 44 movl %eax,%ds
48 movl %eax,%es 45 movl %eax,%es
49 movl %eax,%fs 46 movl %eax,%fs
50 movl %eax,%gs 47 movl %eax,%gs
51 movl %eax,%ss 48 movl %eax,%ss
52 491:
532: cld
54 50
55/* Calculate the delta between where we were compiled to run 51/* Calculate the delta between where we were compiled to run
56 * at and where we were actually loaded at. This can only be done 52 * at and where we were actually loaded at. This can only be done
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 49467640751f..1ccb38a7f0d2 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -29,6 +29,7 @@
29#include <asm/pgtable.h> 29#include <asm/pgtable.h>
30#include <asm/page.h> 30#include <asm/page.h>
31#include <asm/msr.h> 31#include <asm/msr.h>
32#include <asm/asm-offsets.h>
32 33
33.section ".text.head" 34.section ".text.head"
34 .code32 35 .code32
@@ -36,11 +37,17 @@
36 37
37startup_32: 38startup_32:
38 cld 39 cld
40 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
41 * us to not reload segments */
42 testb $(1<<6), BP_loadflags(%esi)
43 jnz 1f
44
39 cli 45 cli
40 movl $(__KERNEL_DS), %eax 46 movl $(__KERNEL_DS), %eax
41 movl %eax, %ds 47 movl %eax, %ds
42 movl %eax, %es 48 movl %eax, %es
43 movl %eax, %ss 49 movl %eax, %ss
501:
44 51
45/* Calculate the delta between where we were compiled to run 52/* Calculate the delta between where we were compiled to run
46 * at and where we were actually loaded at. This can only be done 53 * at and where we were actually loaded at. This can only be done
diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c
index 2ed0a4ce62f0..f63e5ff0aca1 100644
--- a/arch/x86/kernel/acpi/processor.c
+++ b/arch/x86/kernel/acpi/processor.c
@@ -62,8 +62,7 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
62/* Initialize _PDC data based on the CPU vendor */ 62/* Initialize _PDC data based on the CPU vendor */
63void arch_acpi_processor_init_pdc(struct acpi_processor *pr) 63void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
64{ 64{
65 unsigned int cpu = pr->id; 65 struct cpuinfo_x86 *c = &cpu_data(pr->id);
66 struct cpuinfo_x86 *c = &cpu_data(cpu);
67 66
68 pr->pdc = NULL; 67 pr->pdc = NULL;
69 if (c->x86_vendor == X86_VENDOR_INTEL) 68 if (c->x86_vendor == X86_VENDOR_INTEL)
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 7e50bda565b4..d1b6ed98774e 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -15,12 +15,16 @@
15#include <asm/segment.h> 15#include <asm/segment.h>
16#include <asm/thread_info.h> 16#include <asm/thread_info.h>
17#include <asm/ia32.h> 17#include <asm/ia32.h>
18#include <asm/bootparam.h>
18 19
19#define DEFINE(sym, val) \ 20#define DEFINE(sym, val) \
20 asm volatile("\n->" #sym " %0 " #val : : "i" (val)) 21 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
21 22
22#define BLANK() asm volatile("\n->" : : ) 23#define BLANK() asm volatile("\n->" : : )
23 24
25#define OFFSET(sym, str, mem) \
26 DEFINE(sym, offsetof(struct str, mem))
27
24#define __NO_STUBS 1 28#define __NO_STUBS 1
25#undef __SYSCALL 29#undef __SYSCALL
26#undef _ASM_X86_64_UNISTD_H_ 30#undef _ASM_X86_64_UNISTD_H_
@@ -109,5 +113,11 @@ int main(void)
109 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx)); 113 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
110 BLANK(); 114 BLANK();
111 DEFINE(__NR_syscall_max, sizeof(syscalls) - 1); 115 DEFINE(__NR_syscall_max, sizeof(syscalls) - 1);
116
117 BLANK();
118 OFFSET(BP_scratch, boot_params, scratch);
119 OFFSET(BP_loadflags, boot_params, hdr.loadflags);
120 OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
121 OFFSET(BP_version, boot_params, hdr.version);
112 return 0; 122 return 0;
113} 123}
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index dc34acbd54aa..639e6320518e 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -35,12 +35,14 @@ static void __init via_bugs(void)
35} 35}
36 36
37#ifdef CONFIG_ACPI 37#ifdef CONFIG_ACPI
38#ifdef CONFIG_X86_IO_APIC
38 39
39static int __init nvidia_hpet_check(struct acpi_table_header *header) 40static int __init nvidia_hpet_check(struct acpi_table_header *header)
40{ 41{
41 return 0; 42 return 0;
42} 43}
43#endif 44#endif /* CONFIG_X86_IO_APIC */
45#endif /* CONFIG_ACPI */
44 46
45static void __init nvidia_bugs(void) 47static void __init nvidia_bugs(void)
46{ 48{
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 0d8577f05422..aa3d2c8f7737 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -233,6 +233,8 @@ NORET_TYPE void machine_kexec(struct kimage *image)
233 233
234void arch_crash_save_vmcoreinfo(void) 234void arch_crash_save_vmcoreinfo(void)
235{ 235{
236 VMCOREINFO_SYMBOL(init_level4_pgt);
237
236#ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE 238#ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE
237 VMCOREINFO_SYMBOL(node_data); 239 VMCOREINFO_SYMBOL(node_data);
238 VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); 240 VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c
index f32115308399..fcaa026eb807 100644
--- a/arch/x86/kernel/smp_32.c
+++ b/arch/x86/kernel/smp_32.c
@@ -708,10 +708,4 @@ struct smp_ops smp_ops = {
708 .smp_send_reschedule = native_smp_send_reschedule, 708 .smp_send_reschedule = native_smp_send_reschedule,
709 .smp_call_function_mask = native_smp_call_function_mask, 709 .smp_call_function_mask = native_smp_call_function_mask,
710}; 710};
711 711EXPORT_SYMBOL_GPL(smp_ops);
712int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
713 void *info, int wait)
714{
715 return smp_ops.smp_call_function_mask(mask, func, info, wait);
716}
717EXPORT_SYMBOL(smp_call_function_mask);
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 361ac5107b33..69371434b0cf 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -29,14 +29,14 @@
29#include <asm/arch_hooks.h> 29#include <asm/arch_hooks.h>
30 30
31/* TLB state -- visible externally, indexed physically */ 31/* TLB state -- visible externally, indexed physically */
32DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 }; 32DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 };
33 33
34/* CPU IRQ affinity -- set to all ones initially */ 34/* CPU IRQ affinity -- set to all ones initially */
35static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = ~0UL }; 35static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = ~0UL };
36 36
37/* per CPU data structure (for /proc/cpuinfo et al), visible externally 37/* per CPU data structure (for /proc/cpuinfo et al), visible externally
38 * indexed physically */ 38 * indexed physically */
39DEFINE_PER_CPU(cpuinfo_x86, cpu_info) __cacheline_aligned; 39DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
40EXPORT_PER_CPU_SYMBOL(cpu_info); 40EXPORT_PER_CPU_SYMBOL(cpu_info);
41 41
42/* physical ID of the CPU used to boot the system */ 42/* physical ID of the CPU used to boot the system */
diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h
index 7056d8684522..e10b7affdfe5 100644
--- a/include/asm-x86/smp_32.h
+++ b/include/asm-x86/smp_32.h
@@ -94,9 +94,12 @@ static inline void smp_send_reschedule(int cpu)
94{ 94{
95 smp_ops.smp_send_reschedule(cpu); 95 smp_ops.smp_send_reschedule(cpu);
96} 96}
97extern int smp_call_function_mask(cpumask_t mask, 97static inline int smp_call_function_mask(cpumask_t mask,
98 void (*func) (void *info), void *info, 98 void (*func) (void *info), void *info,
99 int wait); 99 int wait)
100{
101 return smp_ops.smp_call_function_mask(mask, func, info, wait);
102}
100 103
101void native_smp_prepare_boot_cpu(void); 104void native_smp_prepare_boot_cpu(void);
102void native_smp_prepare_cpus(unsigned int max_cpus); 105void native_smp_prepare_cpus(unsigned int max_cpus);