From a4c81cf684350797939416c99effb9d3ae46bca6 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sun, 18 May 2008 01:18:57 -0700 Subject: x86: extend e820 ealy_res support 32bit move early_res related from e820_64.c to e820.c make edba detection to be done in head32.c remove smp_alloc_memory, because we have fixed trampoline address now. Signed-off-by: Yinghai Lu arch/x86/kernel/e820.c | 214 ++++++++++++++++++++++++++++++++++++ arch/x86/kernel/e820_64.c | 196 -------------------------------- arch/x86/kernel/head32.c | 76 ++++++++++++ arch/x86/kernel/setup_32.c | 109 +++--------------- arch/x86/kernel/smpboot.c | 17 -- arch/x86/kernel/trampoline.c | 2 arch/x86/mach-voyager/voyager_smp.c | 9 - include/asm-x86/e820.h | 6 + include/asm-x86/e820_64.h | 9 - include/asm-x86/smp.h | 1 arch/x86/kernel/e820.c | 214 ++++++++++++++++++++++++++++++++++++ arch/x86/kernel/e820_64.c | 196 -------------------------------- arch/x86/kernel/head32.c | 76 ++++++++++++ arch/x86/kernel/setup_32.c | 109 +++--------------- arch/x86/kernel/smpboot.c | 17 -- arch/x86/kernel/trampoline.c | 2 arch/x86/mach-voyager/voyager_smp.c | 9 - include/asm-x86/e820.h | 6 + include/asm-x86/e820_64.h | 9 - include/asm-x86/smp.h | 1 arch/x86/kernel/e820.c | 214 ++++++++++++++++++++++++++++++++++++ arch/x86/kernel/e820_64.c | 196 -------------------------------- arch/x86/kernel/head32.c | 76 ++++++++++++ arch/x86/kernel/setup_32.c | 109 +++--------------- arch/x86/kernel/smpboot.c | 17 -- arch/x86/kernel/trampoline.c | 2 arch/x86/mach-voyager/voyager_smp.c | 9 - include/asm-x86/e820.h | 6 + include/asm-x86/e820_64.h | 9 - include/asm-x86/smp.h | 1 10 files changed, 320 insertions(+), 319 deletions(-) Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/asm-x86/smp.h') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 1ebaa5cd3112..514e52b95cef 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -201,7 +201,6 @@ extern void cpu_exit_clear(void); extern void cpu_uninit(void); #endif -extern void smp_alloc_memory(void); extern void lock_ipi_call_lock(void); extern void unlock_ipi_call_lock(void); #endif /* __ASSEMBLY__ */ -- cgit v1.2.2 From 3b16cf874861436725c43ba0b68bdd799297be7c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 26 Jun 2008 11:21:54 +0200 Subject: x86: convert to generic helpers for IPI function calls This converts x86, x86-64, and xen to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). Acked-by: Ingo Molnar Acked-by: Jeremy Fitzhardinge Signed-off-by: Jens Axboe --- include/asm-x86/smp.h | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'include/asm-x86/smp.h') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 1ebaa5cd3112..e3c24807b59b 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -59,9 +59,9 @@ struct smp_ops { void (*smp_send_stop)(void); void (*smp_send_reschedule)(int cpu); - int (*smp_call_function_mask)(cpumask_t mask, - void (*func)(void *info), void *info, - int wait); + + void (*send_call_func_ipi)(cpumask_t mask); + void (*send_call_func_single_ipi)(int cpu); }; /* Globals due to paravirt */ @@ -103,17 +103,22 @@ static inline void smp_send_reschedule(int cpu) smp_ops.smp_send_reschedule(cpu); } -static inline int smp_call_function_mask(cpumask_t mask, - void (*func) (void *info), void *info, - int wait) +static inline void arch_send_call_function_single_ipi(int cpu) +{ + smp_ops.send_call_func_single_ipi(cpu); +} + +static inline void arch_send_call_function_ipi(cpumask_t mask) { - return smp_ops.smp_call_function_mask(mask, func, info, wait); + smp_ops.send_call_func_ipi(mask); } void native_smp_prepare_boot_cpu(void); void native_smp_prepare_cpus(unsigned int max_cpus); void native_smp_cpus_done(unsigned int max_cpus); int native_cpu_up(unsigned int cpunum); +void native_send_call_func_ipi(cpumask_t mask); +void native_send_call_func_single_ipi(int cpu); extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); @@ -202,7 +207,5 @@ extern void cpu_uninit(void); #endif extern void smp_alloc_memory(void); -extern void lock_ipi_call_lock(void); -extern void unlock_ipi_call_lock(void); #endif /* __ASSEMBLY__ */ #endif -- cgit v1.2.2 From 23ca4bba3e20c6c3cb11c1bb0ab4770b724d39ac Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 12 May 2008 21:21:12 +0200 Subject: x86: cleanup early per cpu variables/accesses v4 * Introduce a new PER_CPU macro called "EARLY_PER_CPU". This is used by some per_cpu variables that are initialized and accessed before there are per_cpu areas allocated. ["Early" in respect to per_cpu variables is "earlier than the per_cpu areas have been setup".] This patchset adds these new macros: DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) EXPORT_EARLY_PER_CPU_SYMBOL(_name) DECLARE_EARLY_PER_CPU(_type, _name) early_per_cpu_ptr(_name) early_per_cpu_map(_name, _idx) early_per_cpu(_name, _cpu) The DEFINE macro defines the per_cpu variable as well as the early map and pointer. It also initializes the per_cpu variable and map elements to "_initvalue". The early_* macros provide access to the initial map (usually setup during system init) and the early pointer. This pointer is initialized to point to the early map but is then NULL'ed when the actual per_cpu areas are setup. After that the per_cpu variable is the correct access to the variable. The early_per_cpu() macro is not very efficient but does show how to access the variable if you have a function that can be called both "early" and "late". It tests the early ptr to be NULL, and if not then it's still valid. Otherwise, the per_cpu variable is used instead: #define early_per_cpu(_name, _cpu) \ (early_per_cpu_ptr(_name) ? \ early_per_cpu_ptr(_name)[_cpu] : \ per_cpu(_name, _cpu)) A better method is to actually check the pointer manually. In the case below, numa_set_node can be called both "early" and "late": void __cpuinit numa_set_node(int cpu, int node) { int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); if (cpu_to_node_map) cpu_to_node_map[cpu] = node; else per_cpu(x86_cpu_to_node_map, cpu) = node; } * Add a flag "arch_provides_topology_pointers" that indicates pointers to topology cpumask_t maps are available. Otherwise, use the function returning the cpumask_t value. This is useful if cpumask_t set size is very large to avoid copying data on to/off of the stack. * The coverage of CONFIG_DEBUG_PER_CPU_MAPS has been increased while the non-debug case has been optimized a bit. * Remove an unreferenced compiler warning in drivers/base/topology.c * Clean up #ifdef in setup.c For inclusion into sched-devel/latest tree. Based on: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git + sched-devel/latest .../mingo/linux-2.6-sched-devel.git Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/asm-x86/smp.h | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) (limited to 'include/asm-x86/smp.h') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 1ebaa5cd3112..ec841639fb44 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -29,21 +29,12 @@ extern int smp_num_siblings; extern unsigned int num_processors; extern cpumask_t cpu_initialized; -#ifdef CONFIG_SMP -extern u16 x86_cpu_to_apicid_init[]; -extern u16 x86_bios_cpu_apicid_init[]; -extern void *x86_cpu_to_apicid_early_ptr; -extern void *x86_bios_cpu_apicid_early_ptr; -#else -#define x86_cpu_to_apicid_early_ptr NULL -#define x86_bios_cpu_apicid_early_ptr NULL -#endif - DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_t, cpu_core_map); DECLARE_PER_CPU(u16, cpu_llc_id); -DECLARE_PER_CPU(u16, x86_cpu_to_apicid); -DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); + +DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); +DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); /* Static state in head.S used to set up a CPU */ extern struct { -- cgit v1.2.2 From 1481a3dd42c21ac4a8b9497cb9f5df816d6b064f Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Wed, 4 Jun 2008 15:35:03 -0300 Subject: x86: move cpu_exit_clear to process_32.c Take it out of smpboot.c, and move it to process_32.c, closer to its only user. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/asm-x86/smp.h') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index fc1007321ef6..fad45f6a193f 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -188,7 +188,6 @@ static inline int hard_smp_processor_id(void) #endif /* CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_HOTPLUG_CPU -extern void cpu_exit_clear(void); extern void cpu_uninit(void); #endif -- cgit v1.2.2 From 329513a35d1a2b6b28d54f5c2c0dde4face8200b Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 2 Jul 2008 18:54:40 -0700 Subject: x86: move prefill_possible_map calling early call it right after we are done with MADT/mptable handling, instead of doing that in setup_per_cpu_areas() later on... this way for_possible_cpu() can be used early. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/asm-x86/smp.h') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index fad45f6a193f..b324a0645a78 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -119,6 +119,10 @@ static inline int num_booting_cpus(void) { return cpus_weight(cpu_callout_map); } +#else +static inline void prefill_possible_map(void) +{ +} #endif /* CONFIG_SMP */ extern unsigned disabled_cpus __cpuinitdata; -- cgit v1.2.2 From 4a7017370aa0a94a00ae5b5705e9169cdcae5fb8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 3 Jul 2008 15:57:47 +0200 Subject: x86: move prefill_possible_map calling early, fix fix: arch/x86/kernel/built-in.o: In function `setup_arch': : undefined reference to `prefill_possible_map' Signed-off-by: Ingo Molnar --- include/asm-x86/smp.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'include/asm-x86/smp.h') diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index b324a0645a78..2e221f1ce0b2 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -109,8 +109,6 @@ int native_cpu_up(unsigned int cpunum); extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); -extern void prefill_possible_map(void); - void smp_store_cpu_info(int id); #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) @@ -119,11 +117,15 @@ static inline int num_booting_cpus(void) { return cpus_weight(cpu_callout_map); } +#endif /* CONFIG_SMP */ + +#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_CPU) +extern void prefill_possible_map(void); #else static inline void prefill_possible_map(void) { } -#endif /* CONFIG_SMP */ +#endif extern unsigned disabled_cpus __cpuinitdata; -- cgit v1.2.2