aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-21 22:43:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-21 22:43:57 -0400
commitbf67f3a5c456a18f2e8d062f7e88506ef2cd9837 (patch)
tree2a2324b2572162059307db82f9238eeb25673a77 /arch/x86
parent226da0dbc84ed97f448523e2a4cb91c27fa68ed9 (diff)
parent203dacbdca977bedaba61ad2fca75d934060a5d5 (diff)
Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull smp hotplug cleanups from Thomas Gleixner: "This series is merily a cleanup of code copied around in arch/* and not changing any of the real cpu hotplug horrors yet. I wish I'd had something more substantial for 3.5, but I underestimated the lurking horror..." Fix up trivial conflicts in arch/{arm,sparc,x86}/Kconfig and arch/sparc/include/asm/thread_info_32.h * 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (79 commits) um: Remove leftover declaration of alloc_task_struct_node() task_allocator: Use config switches instead of magic defines sparc: Use common threadinfo allocator score: Use common threadinfo allocator sh-use-common-threadinfo-allocator mn10300: Use common threadinfo allocator powerpc: Use common threadinfo allocator mips: Use common threadinfo allocator hexagon: Use common threadinfo allocator m32r: Use common threadinfo allocator frv: Use common threadinfo allocator cris: Use common threadinfo allocator x86: Use common threadinfo allocator c6x: Use common threadinfo allocator fork: Provide kmemcache based thread_info allocator tile: Use common threadinfo allocator fork: Provide weak arch_release_[task_struct|thread_info] functions fork: Move thread info gfp flags to header fork: Remove the weak insanity sh: Remove cpu_idle_wait() ...
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/Makefile1
-rw-r--r--arch/x86/include/asm/boot.h2
-rw-r--r--arch/x86/include/asm/page_32_types.h4
-rw-r--r--arch/x86/include/asm/page_64_types.h4
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/smp.h11
-rw-r--r--arch/x86/include/asm/thread_info.h21
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/apm_32.c2
-rw-r--r--arch/x86/kernel/init_task.c42
-rw-r--r--arch/x86/kernel/irq_32.c8
-rw-r--r--arch/x86/kernel/process.c34
-rw-r--r--arch/x86/kernel/smpboot.c83
-rw-r--r--arch/x86/xen/smp.c19
15 files changed, 46 insertions, 193 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 25f87bccbf8f..c940cb6f0409 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -82,6 +82,7 @@ config X86
82 select ARCH_HAVE_NMI_SAFE_CMPXCHG 82 select ARCH_HAVE_NMI_SAFE_CMPXCHG
83 select GENERIC_IOMAP 83 select GENERIC_IOMAP
84 select DCACHE_WORD_ACCESS 84 select DCACHE_WORD_ACCESS
85 select GENERIC_SMP_IDLE_THREAD
85 86
86config INSTRUCTION_DECODER 87config INSTRUCTION_DECODER
87 def_bool (KPROBES || PERF_EVENTS) 88 def_bool (KPROBES || PERF_EVENTS)
@@ -160,9 +161,6 @@ config RWSEM_GENERIC_SPINLOCK
160config RWSEM_XCHGADD_ALGORITHM 161config RWSEM_XCHGADD_ALGORITHM
161 def_bool X86_XADD 162 def_bool X86_XADD
162 163
163config ARCH_HAS_CPU_IDLE_WAIT
164 def_bool y
165
166config GENERIC_CALIBRATE_DELAY 164config GENERIC_CALIBRATE_DELAY
167 def_bool y 165 def_bool y
168 166
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 94e91e401da9..277418ff8b52 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -149,7 +149,6 @@ archheaders:
149head-y := arch/x86/kernel/head_$(BITS).o 149head-y := arch/x86/kernel/head_$(BITS).o
150head-y += arch/x86/kernel/head$(BITS).o 150head-y += arch/x86/kernel/head$(BITS).o
151head-y += arch/x86/kernel/head.o 151head-y += arch/x86/kernel/head.o
152head-y += arch/x86/kernel/init_task.o
153 152
154libs-y += arch/x86/lib/ 153libs-y += arch/x86/lib/
155 154
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 5e1a2eef3e7c..b13fe63bdc59 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -19,7 +19,7 @@
19#ifdef CONFIG_X86_64 19#ifdef CONFIG_X86_64
20#define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT 20#define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
21#else 21#else
22#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_ORDER) 22#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_SIZE_ORDER)
23#endif 23#endif
24#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2) 24#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)
25 25
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index ade619ff9e2a..ef17af013475 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -15,8 +15,8 @@
15 */ 15 */
16#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) 16#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
17 17
18#define THREAD_ORDER 1 18#define THREAD_SIZE_ORDER 1
19#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 19#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
20 20
21#define STACKFAULT_STACK 0 21#define STACKFAULT_STACK 0
22#define DOUBLEFAULT_STACK 1 22#define DOUBLEFAULT_STACK 1
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 7639dbf5d223..320f7bb95f76 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -1,8 +1,8 @@
1#ifndef _ASM_X86_PAGE_64_DEFS_H 1#ifndef _ASM_X86_PAGE_64_DEFS_H
2#define _ASM_X86_PAGE_64_DEFS_H 2#define _ASM_X86_PAGE_64_DEFS_H
3 3
4#define THREAD_ORDER 1 4#define THREAD_SIZE_ORDER 1
5#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 5#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
6#define CURRENT_MASK (~(THREAD_SIZE - 1)) 6#define CURRENT_MASK (~(THREAD_SIZE - 1))
7 7
8#define EXCEPTION_STACK_ORDER 0 8#define EXCEPTION_STACK_ORDER 0
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 4fa7dcceb6c0..ccbb1ea99ccb 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -974,8 +974,6 @@ extern bool cpu_has_amd_erratum(const int *);
974#define cpu_has_amd_erratum(x) (false) 974#define cpu_has_amd_erratum(x) (false)
975#endif /* CONFIG_CPU_SUP_AMD */ 975#endif /* CONFIG_CPU_SUP_AMD */
976 976
977void cpu_idle_wait(void);
978
979extern unsigned long arch_align_stack(unsigned long sp); 977extern unsigned long arch_align_stack(unsigned long sp);
980extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 978extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
981 979
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 0434c400287c..f8cbc6f20e31 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -62,6 +62,8 @@ DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid);
62/* Static state in head.S used to set up a CPU */ 62/* Static state in head.S used to set up a CPU */
63extern unsigned long stack_start; /* Initial stack pointer address */ 63extern unsigned long stack_start; /* Initial stack pointer address */
64 64
65struct task_struct;
66
65struct smp_ops { 67struct smp_ops {
66 void (*smp_prepare_boot_cpu)(void); 68 void (*smp_prepare_boot_cpu)(void);
67 void (*smp_prepare_cpus)(unsigned max_cpus); 69 void (*smp_prepare_cpus)(unsigned max_cpus);
@@ -70,7 +72,7 @@ struct smp_ops {
70 void (*stop_other_cpus)(int wait); 72 void (*stop_other_cpus)(int wait);
71 void (*smp_send_reschedule)(int cpu); 73 void (*smp_send_reschedule)(int cpu);
72 74
73 int (*cpu_up)(unsigned cpu); 75 int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
74 int (*cpu_disable)(void); 76 int (*cpu_disable)(void);
75 void (*cpu_die)(unsigned int cpu); 77 void (*cpu_die)(unsigned int cpu);
76 void (*play_dead)(void); 78 void (*play_dead)(void);
@@ -113,9 +115,9 @@ static inline void smp_cpus_done(unsigned int max_cpus)
113 smp_ops.smp_cpus_done(max_cpus); 115 smp_ops.smp_cpus_done(max_cpus);
114} 116}
115 117
116static inline int __cpu_up(unsigned int cpu) 118static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle)
117{ 119{
118 return smp_ops.cpu_up(cpu); 120 return smp_ops.cpu_up(cpu, tidle);
119} 121}
120 122
121static inline int __cpu_disable(void) 123static inline int __cpu_disable(void)
@@ -152,7 +154,7 @@ void cpu_disable_common(void);
152void native_smp_prepare_boot_cpu(void); 154void native_smp_prepare_boot_cpu(void);
153void native_smp_prepare_cpus(unsigned int max_cpus); 155void native_smp_prepare_cpus(unsigned int max_cpus);
154void native_smp_cpus_done(unsigned int max_cpus); 156void native_smp_cpus_done(unsigned int max_cpus);
155int native_cpu_up(unsigned int cpunum); 157int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
156int native_cpu_disable(void); 158int native_cpu_disable(void);
157void native_cpu_die(unsigned int cpu); 159void native_cpu_die(unsigned int cpu);
158void native_play_dead(void); 160void native_play_dead(void);
@@ -162,6 +164,7 @@ int wbinvd_on_all_cpus(void);
162 164
163void native_send_call_func_ipi(const struct cpumask *mask); 165void native_send_call_func_ipi(const struct cpumask *mask);
164void native_send_call_func_single_ipi(int cpu); 166void native_send_call_func_single_ipi(int cpu);
167void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
165 168
166void smp_store_cpu_info(int id); 169void smp_store_cpu_info(int id);
167#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) 170#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index ad6df8ccd715..73cfe0d309c9 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -155,24 +155,6 @@ struct thread_info {
155 155
156#define PREEMPT_ACTIVE 0x10000000 156#define PREEMPT_ACTIVE 0x10000000
157 157
158/* thread information allocation */
159#ifdef CONFIG_DEBUG_STACK_USAGE
160#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
161#else
162#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK)
163#endif
164
165#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
166
167#define alloc_thread_info_node(tsk, node) \
168({ \
169 struct page *page = alloc_pages_node(node, THREAD_FLAGS, \
170 THREAD_ORDER); \
171 struct thread_info *ret = page ? page_address(page) : NULL; \
172 \
173 ret; \
174})
175
176#ifdef CONFIG_X86_32 158#ifdef CONFIG_X86_32
177 159
178#define STACK_WARN (THREAD_SIZE/8) 160#define STACK_WARN (THREAD_SIZE/8)
@@ -282,8 +264,7 @@ static inline bool is_ia32_task(void)
282 264
283#ifndef __ASSEMBLY__ 265#ifndef __ASSEMBLY__
284extern void arch_task_cache_init(void); 266extern void arch_task_cache_init(void);
285extern void free_thread_info(struct thread_info *ti);
286extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); 267extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
287#define arch_task_cache_init arch_task_cache_init 268extern void arch_release_task_struct(struct task_struct *tsk);
288#endif 269#endif
289#endif /* _ASM_X86_THREAD_INFO_H */ 270#endif /* _ASM_X86_THREAD_INFO_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 532d2e090e6f..56ebd1f98447 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4 4
5extra-y := head_$(BITS).o head$(BITS).o head.o init_task.o vmlinux.lds 5extra-y := head_$(BITS).o head$(BITS).o head.o vmlinux.lds
6 6
7CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) 7CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
8 8
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 459e78cbf61e..07b0c0db466c 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -2401,7 +2401,7 @@ static void __exit apm_exit(void)
2401 * (pm_idle), Wait for all processors to update cached/local 2401 * (pm_idle), Wait for all processors to update cached/local
2402 * copies of pm_idle before proceeding. 2402 * copies of pm_idle before proceeding.
2403 */ 2403 */
2404 cpu_idle_wait(); 2404 kick_all_cpus_sync();
2405 } 2405 }
2406 if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0) 2406 if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0)
2407 && (apm_info.connection_version > 0x0100)) { 2407 && (apm_info.connection_version > 0x0100)) {
diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
deleted file mode 100644
index 43e9ccf44947..000000000000
--- a/arch/x86/kernel/init_task.c
+++ /dev/null
@@ -1,42 +0,0 @@
1#include <linux/mm.h>
2#include <linux/module.h>
3#include <linux/sched.h>
4#include <linux/init.h>
5#include <linux/init_task.h>
6#include <linux/fs.h>
7#include <linux/mqueue.h>
8
9#include <asm/uaccess.h>
10#include <asm/pgtable.h>
11#include <asm/desc.h>
12
13static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
14static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15
16/*
17 * Initial thread structure.
18 *
19 * We need to make sure that this is THREAD_SIZE aligned due to the
20 * way process stacks are handled. This is done by having a special
21 * "init_task" linker map entry..
22 */
23union thread_union init_thread_union __init_task_data =
24 { INIT_THREAD_INFO(init_task) };
25
26/*
27 * Initial task structure.
28 *
29 * All other task structs will be allocated on slabs in fork.c
30 */
31struct task_struct init_task = INIT_TASK(init_task);
32EXPORT_SYMBOL(init_task);
33
34/*
35 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
36 * no more per-task TSS's. The TSS size is kept cacheline-aligned
37 * so they are allowed to end up in the .data..cacheline_aligned
38 * section. Since TSS's are completely CPU-local, we want them
39 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
40 */
41DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
42
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 58b7f27cb3e9..344faf8d0d62 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -127,8 +127,8 @@ void __cpuinit irq_ctx_init(int cpu)
127 return; 127 return;
128 128
129 irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), 129 irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
130 THREAD_FLAGS, 130 THREADINFO_GFP,
131 THREAD_ORDER)); 131 THREAD_SIZE_ORDER));
132 memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); 132 memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
133 irqctx->tinfo.cpu = cpu; 133 irqctx->tinfo.cpu = cpu;
134 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; 134 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
@@ -137,8 +137,8 @@ void __cpuinit irq_ctx_init(int cpu)
137 per_cpu(hardirq_ctx, cpu) = irqctx; 137 per_cpu(hardirq_ctx, cpu) = irqctx;
138 138
139 irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), 139 irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
140 THREAD_FLAGS, 140 THREADINFO_GFP,
141 THREAD_ORDER)); 141 THREAD_SIZE_ORDER));
142 memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); 142 memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
143 irqctx->tinfo.cpu = cpu; 143 irqctx->tinfo.cpu = cpu;
144 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); 144 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 1d92a5ab6e8b..e8173154800d 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -27,6 +27,15 @@
27#include <asm/debugreg.h> 27#include <asm/debugreg.h>
28#include <asm/nmi.h> 28#include <asm/nmi.h>
29 29
30/*
31 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
32 * no more per-task TSS's. The TSS size is kept cacheline-aligned
33 * so they are allowed to end up in the .data..cacheline_aligned
34 * section. Since TSS's are completely CPU-local, we want them
35 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
36 */
37DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
38
30#ifdef CONFIG_X86_64 39#ifdef CONFIG_X86_64
31static DEFINE_PER_CPU(unsigned char, is_idle); 40static DEFINE_PER_CPU(unsigned char, is_idle);
32static ATOMIC_NOTIFIER_HEAD(idle_notifier); 41static ATOMIC_NOTIFIER_HEAD(idle_notifier);
@@ -67,10 +76,9 @@ void free_thread_xstate(struct task_struct *tsk)
67 fpu_free(&tsk->thread.fpu); 76 fpu_free(&tsk->thread.fpu);
68} 77}
69 78
70void free_thread_info(struct thread_info *ti) 79void arch_release_task_struct(struct task_struct *tsk)
71{ 80{
72 free_thread_xstate(ti->task); 81 free_thread_xstate(tsk);
73 free_pages((unsigned long)ti, THREAD_ORDER);
74} 82}
75 83
76void arch_task_cache_init(void) 84void arch_task_cache_init(void)
@@ -516,26 +524,6 @@ void stop_this_cpu(void *dummy)
516 } 524 }
517} 525}
518 526
519static void do_nothing(void *unused)
520{
521}
522
523/*
524 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
525 * pm_idle and update to new pm_idle value. Required while changing pm_idle
526 * handler on SMP systems.
527 *
528 * Caller must have changed pm_idle to the new value before the call. Old
529 * pm_idle value will not be used by any CPU after the return of this function.
530 */
531void cpu_idle_wait(void)
532{
533 smp_mb();
534 /* kick all the CPUs so that they exit out of pm_idle */
535 smp_call_function(do_nothing, NULL, 1);
536}
537EXPORT_SYMBOL_GPL(cpu_idle_wait);
538
539/* Default MONITOR/MWAIT with no hints, used for default C1 state */ 527/* Default MONITOR/MWAIT with no hints, used for default C1 state */
540static void mwait_idle(void) 528static void mwait_idle(void)
541{ 529{
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 6e1e406038c2..3acaf51dfddb 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -76,20 +76,8 @@
76/* State of each CPU */ 76/* State of each CPU */
77DEFINE_PER_CPU(int, cpu_state) = { 0 }; 77DEFINE_PER_CPU(int, cpu_state) = { 0 };
78 78
79/* Store all idle threads, this can be reused instead of creating
80* a new thread. Also avoids complicated thread destroy functionality
81* for idle threads.
82*/
83#ifdef CONFIG_HOTPLUG_CPU 79#ifdef CONFIG_HOTPLUG_CPU
84/* 80/*
85 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
86 * removed after init for !CONFIG_HOTPLUG_CPU.
87 */
88static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
89#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
90#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
91
92/*
93 * We need this for trampoline_base protection from concurrent accesses when 81 * We need this for trampoline_base protection from concurrent accesses when
94 * off- and onlining cores wildly. 82 * off- and onlining cores wildly.
95 */ 83 */
@@ -97,20 +85,16 @@ static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
97 85
98void cpu_hotplug_driver_lock(void) 86void cpu_hotplug_driver_lock(void)
99{ 87{
100 mutex_lock(&x86_cpu_hotplug_driver_mutex); 88 mutex_lock(&x86_cpu_hotplug_driver_mutex);
101} 89}
102 90
103void cpu_hotplug_driver_unlock(void) 91void cpu_hotplug_driver_unlock(void)
104{ 92{
105 mutex_unlock(&x86_cpu_hotplug_driver_mutex); 93 mutex_unlock(&x86_cpu_hotplug_driver_mutex);
106} 94}
107 95
108ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; } 96ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
109ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; } 97ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
110#else
111static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
112#define get_idle_for_cpu(x) (idle_thread_array[(x)])
113#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
114#endif 98#endif
115 99
116/* Number of siblings per CPU package */ 100/* Number of siblings per CPU package */
@@ -618,22 +602,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
618 return (send_status | accept_status); 602 return (send_status | accept_status);
619} 603}
620 604
621struct create_idle {
622 struct work_struct work;
623 struct task_struct *idle;
624 struct completion done;
625 int cpu;
626};
627
628static void __cpuinit do_fork_idle(struct work_struct *work)
629{
630 struct create_idle *c_idle =
631 container_of(work, struct create_idle, work);
632
633 c_idle->idle = fork_idle(c_idle->cpu);
634 complete(&c_idle->done);
635}
636
637/* reduce the number of lines printed when booting a large cpu count system */ 605/* reduce the number of lines printed when booting a large cpu count system */
638static void __cpuinit announce_cpu(int cpu, int apicid) 606static void __cpuinit announce_cpu(int cpu, int apicid)
639{ 607{
@@ -660,58 +628,31 @@ static void __cpuinit announce_cpu(int cpu, int apicid)
660 * Returns zero if CPU booted OK, else error code from 628 * Returns zero if CPU booted OK, else error code from
661 * ->wakeup_secondary_cpu. 629 * ->wakeup_secondary_cpu.
662 */ 630 */
663static int __cpuinit do_boot_cpu(int apicid, int cpu) 631static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
664{ 632{
665 unsigned long boot_error = 0; 633 unsigned long boot_error = 0;
666 unsigned long start_ip; 634 unsigned long start_ip;
667 int timeout; 635 int timeout;
668 struct create_idle c_idle = {
669 .cpu = cpu,
670 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
671 };
672
673 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
674 636
675 alternatives_smp_switch(1); 637 alternatives_smp_switch(1);
676 638
677 c_idle.idle = get_idle_for_cpu(cpu); 639 idle->thread.sp = (unsigned long) (((struct pt_regs *)
678 640 (THREAD_SIZE + task_stack_page(idle))) - 1);
679 /* 641 per_cpu(current_task, cpu) = idle;
680 * We can't use kernel_thread since we must avoid to
681 * reschedule the child.
682 */
683 if (c_idle.idle) {
684 c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
685 (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
686 init_idle(c_idle.idle, cpu);
687 goto do_rest;
688 }
689 642
690 schedule_work(&c_idle.work);
691 wait_for_completion(&c_idle.done);
692
693 if (IS_ERR(c_idle.idle)) {
694 printk("failed fork for CPU %d\n", cpu);
695 destroy_work_on_stack(&c_idle.work);
696 return PTR_ERR(c_idle.idle);
697 }
698
699 set_idle_for_cpu(cpu, c_idle.idle);
700do_rest:
701 per_cpu(current_task, cpu) = c_idle.idle;
702#ifdef CONFIG_X86_32 643#ifdef CONFIG_X86_32
703 /* Stack for startup_32 can be just as for start_secondary onwards */ 644 /* Stack for startup_32 can be just as for start_secondary onwards */
704 irq_ctx_init(cpu); 645 irq_ctx_init(cpu);
705#else 646#else
706 clear_tsk_thread_flag(c_idle.idle, TIF_FORK); 647 clear_tsk_thread_flag(idle, TIF_FORK);
707 initial_gs = per_cpu_offset(cpu); 648 initial_gs = per_cpu_offset(cpu);
708 per_cpu(kernel_stack, cpu) = 649 per_cpu(kernel_stack, cpu) =
709 (unsigned long)task_stack_page(c_idle.idle) - 650 (unsigned long)task_stack_page(idle) -
710 KERNEL_STACK_OFFSET + THREAD_SIZE; 651 KERNEL_STACK_OFFSET + THREAD_SIZE;
711#endif 652#endif
712 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); 653 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
713 initial_code = (unsigned long)start_secondary; 654 initial_code = (unsigned long)start_secondary;
714 stack_start = c_idle.idle->thread.sp; 655 stack_start = idle->thread.sp;
715 656
716 /* start_ip had better be page-aligned! */ 657 /* start_ip had better be page-aligned! */
717 start_ip = trampoline_address(); 658 start_ip = trampoline_address();
@@ -813,12 +754,10 @@ do_rest:
813 */ 754 */
814 smpboot_restore_warm_reset_vector(); 755 smpboot_restore_warm_reset_vector();
815 } 756 }
816
817 destroy_work_on_stack(&c_idle.work);
818 return boot_error; 757 return boot_error;
819} 758}
820 759
821int __cpuinit native_cpu_up(unsigned int cpu) 760int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
822{ 761{
823 int apicid = apic->cpu_present_to_apicid(cpu); 762 int apicid = apic->cpu_present_to_apicid(cpu);
824 unsigned long flags; 763 unsigned long flags;
@@ -851,7 +790,7 @@ int __cpuinit native_cpu_up(unsigned int cpu)
851 790
852 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 791 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
853 792
854 err = do_boot_cpu(apicid, cpu); 793 err = do_boot_cpu(apicid, cpu, tidle);
855 if (err) { 794 if (err) {
856 pr_debug("do_boot_cpu failed %d\n", err); 795 pr_debug("do_boot_cpu failed %d\n", err);
857 return -EIO; 796 return -EIO;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 0503c0c493a9..3700945ed0d5 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -265,18 +265,8 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
265 set_cpu_possible(cpu, false); 265 set_cpu_possible(cpu, false);
266 } 266 }
267 267
268 for_each_possible_cpu (cpu) { 268 for_each_possible_cpu(cpu)
269 struct task_struct *idle;
270
271 if (cpu == 0)
272 continue;
273
274 idle = fork_idle(cpu);
275 if (IS_ERR(idle))
276 panic("failed fork for CPU %d", cpu);
277
278 set_cpu_present(cpu, true); 269 set_cpu_present(cpu, true);
279 }
280} 270}
281 271
282static int __cpuinit 272static int __cpuinit
@@ -346,9 +336,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
346 return 0; 336 return 0;
347} 337}
348 338
349static int __cpuinit xen_cpu_up(unsigned int cpu) 339static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
350{ 340{
351 struct task_struct *idle = idle_task(cpu);
352 int rc; 341 int rc;
353 342
354 per_cpu(current_task, cpu) = idle; 343 per_cpu(current_task, cpu) = idle;
@@ -562,10 +551,10 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
562 xen_init_lock_cpu(0); 551 xen_init_lock_cpu(0);
563} 552}
564 553
565static int __cpuinit xen_hvm_cpu_up(unsigned int cpu) 554static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
566{ 555{
567 int rc; 556 int rc;
568 rc = native_cpu_up(cpu); 557 rc = native_cpu_up(cpu, tidle);
569 WARN_ON (xen_smp_intr_init(cpu)); 558 WARN_ON (xen_smp_intr_init(cpu));
570 return rc; 559 return rc;
571} 560}