diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-21 22:43:57 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-21 22:43:57 -0400 |
commit | bf67f3a5c456a18f2e8d062f7e88506ef2cd9837 (patch) | |
tree | 2a2324b2572162059307db82f9238eeb25673a77 /arch/ia64 | |
parent | 226da0dbc84ed97f448523e2a4cb91c27fa68ed9 (diff) | |
parent | 203dacbdca977bedaba61ad2fca75d934060a5d5 (diff) |
Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull smp hotplug cleanups from Thomas Gleixner:
"This series is merily a cleanup of code copied around in arch/* and
not changing any of the real cpu hotplug horrors yet. I wish I'd had
something more substantial for 3.5, but I underestimated the lurking
horror..."
Fix up trivial conflicts in arch/{arm,sparc,x86}/Kconfig and
arch/sparc/include/asm/thread_info_32.h
* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (79 commits)
um: Remove leftover declaration of alloc_task_struct_node()
task_allocator: Use config switches instead of magic defines
sparc: Use common threadinfo allocator
score: Use common threadinfo allocator
sh-use-common-threadinfo-allocator
mn10300: Use common threadinfo allocator
powerpc: Use common threadinfo allocator
mips: Use common threadinfo allocator
hexagon: Use common threadinfo allocator
m32r: Use common threadinfo allocator
frv: Use common threadinfo allocator
cris: Use common threadinfo allocator
x86: Use common threadinfo allocator
c6x: Use common threadinfo allocator
fork: Provide kmemcache based thread_info allocator
tile: Use common threadinfo allocator
fork: Provide weak arch_release_[task_struct|thread_info] functions
fork: Move thread info gfp flags to header
fork: Remove the weak insanity
sh: Remove cpu_idle_wait()
...
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 4 | ||||
-rw-r--r-- | arch/ia64/include/asm/processor.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/thread_info.h | 3 | ||||
-rw-r--r-- | arch/ia64/kernel/process.c | 20 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 63 |
5 files changed, 8 insertions, 83 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index bd7266903bf8..ba667b60f32d 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -33,6 +33,10 @@ config IA64 | |||
33 | select ARCH_WANT_OPTIONAL_GPIOLIB | 33 | select ARCH_WANT_OPTIONAL_GPIOLIB |
34 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 34 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
35 | select GENERIC_IOMAP | 35 | select GENERIC_IOMAP |
36 | select GENERIC_SMP_IDLE_THREAD | ||
37 | select ARCH_INIT_TASK | ||
38 | select ARCH_TASK_STRUCT_ALLOCATOR | ||
39 | select ARCH_THREAD_INFO_ALLOCATOR | ||
36 | default y | 40 | default y |
37 | help | 41 | help |
38 | The Itanium Processor Family is Intel's 64-bit successor to | 42 | The Itanium Processor Family is Intel's 64-bit successor to |
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 483f6c6a4238..f92f67aba618 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h | |||
@@ -723,7 +723,6 @@ extern unsigned long boot_option_idle_override; | |||
723 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT, | 723 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT, |
724 | IDLE_NOMWAIT, IDLE_POLL}; | 724 | IDLE_NOMWAIT, IDLE_POLL}; |
725 | 725 | ||
726 | void cpu_idle_wait(void); | ||
727 | void default_idle(void); | 726 | void default_idle(void); |
728 | 727 | ||
729 | #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) | 728 | #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) |
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index e054bcc4273c..310d9734f02d 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h | |||
@@ -54,8 +54,6 @@ struct thread_info { | |||
54 | }, \ | 54 | }, \ |
55 | } | 55 | } |
56 | 56 | ||
57 | #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR | ||
58 | |||
59 | #ifndef ASM_OFFSETS_C | 57 | #ifndef ASM_OFFSETS_C |
60 | /* how to get the thread information struct from C */ | 58 | /* how to get the thread information struct from C */ |
61 | #define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE)) | 59 | #define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE)) |
@@ -84,7 +82,6 @@ struct thread_info { | |||
84 | #endif | 82 | #endif |
85 | #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) | 83 | #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) |
86 | 84 | ||
87 | #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR | ||
88 | #define alloc_task_struct_node(node) \ | 85 | #define alloc_task_struct_node(node) \ |
89 | ({ \ | 86 | ({ \ |
90 | struct page *page = alloc_pages_node(node, GFP_KERNEL | __GFP_COMP, \ | 87 | struct page *page = alloc_pages_node(node, GFP_KERNEL | __GFP_COMP, \ |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index ce74e143aea3..5e0e86ddb12f 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -273,26 +273,6 @@ static inline void play_dead(void) | |||
273 | } | 273 | } |
274 | #endif /* CONFIG_HOTPLUG_CPU */ | 274 | #endif /* CONFIG_HOTPLUG_CPU */ |
275 | 275 | ||
276 | static void do_nothing(void *unused) | ||
277 | { | ||
278 | } | ||
279 | |||
280 | /* | ||
281 | * cpu_idle_wait - Used to ensure that all the CPUs discard old value of | ||
282 | * pm_idle and update to new pm_idle value. Required while changing pm_idle | ||
283 | * handler on SMP systems. | ||
284 | * | ||
285 | * Caller must have changed pm_idle to the new value before the call. Old | ||
286 | * pm_idle value will not be used by any CPU after the return of this function. | ||
287 | */ | ||
288 | void cpu_idle_wait(void) | ||
289 | { | ||
290 | smp_mb(); | ||
291 | /* kick all the CPUs so that they exit out of pm_idle */ | ||
292 | smp_call_function(do_nothing, NULL, 1); | ||
293 | } | ||
294 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | ||
295 | |||
296 | void __attribute__((noreturn)) | 276 | void __attribute__((noreturn)) |
297 | cpu_idle (void) | 277 | cpu_idle (void) |
298 | { | 278 | { |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 796f6a5b966a..1113b8aba07f 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -75,13 +75,6 @@ | |||
75 | #endif | 75 | #endif |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * Store all idle threads, this can be reused instead of creating | ||
79 | * a new thread. Also avoids complicated thread destroy functionality | ||
80 | * for idle threads. | ||
81 | */ | ||
82 | struct task_struct *idle_thread_array[NR_CPUS]; | ||
83 | |||
84 | /* | ||
85 | * Global array allocated for NR_CPUS at boot time | 78 | * Global array allocated for NR_CPUS at boot time |
86 | */ | 79 | */ |
87 | struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS]; | 80 | struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS]; |
@@ -94,13 +87,7 @@ struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0]; | |||
94 | 87 | ||
95 | #define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]); | 88 | #define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]); |
96 | 89 | ||
97 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | ||
98 | #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) | ||
99 | |||
100 | #else | 90 | #else |
101 | |||
102 | #define get_idle_for_cpu(x) (NULL) | ||
103 | #define set_idle_for_cpu(x,p) | ||
104 | #define set_brendez_area(x) | 91 | #define set_brendez_area(x) |
105 | #endif | 92 | #endif |
106 | 93 | ||
@@ -480,54 +467,12 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | |||
480 | return NULL; | 467 | return NULL; |
481 | } | 468 | } |
482 | 469 | ||
483 | struct create_idle { | ||
484 | struct work_struct work; | ||
485 | struct task_struct *idle; | ||
486 | struct completion done; | ||
487 | int cpu; | ||
488 | }; | ||
489 | |||
490 | void __cpuinit | ||
491 | do_fork_idle(struct work_struct *work) | ||
492 | { | ||
493 | struct create_idle *c_idle = | ||
494 | container_of(work, struct create_idle, work); | ||
495 | |||
496 | c_idle->idle = fork_idle(c_idle->cpu); | ||
497 | complete(&c_idle->done); | ||
498 | } | ||
499 | |||
500 | static int __cpuinit | 470 | static int __cpuinit |
501 | do_boot_cpu (int sapicid, int cpu) | 471 | do_boot_cpu (int sapicid, int cpu, struct task_struct *idle) |
502 | { | 472 | { |
503 | int timeout; | 473 | int timeout; |
504 | struct create_idle c_idle = { | ||
505 | .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), | ||
506 | .cpu = cpu, | ||
507 | .done = COMPLETION_INITIALIZER(c_idle.done), | ||
508 | }; | ||
509 | |||
510 | /* | ||
511 | * We can't use kernel_thread since we must avoid to | ||
512 | * reschedule the child. | ||
513 | */ | ||
514 | c_idle.idle = get_idle_for_cpu(cpu); | ||
515 | if (c_idle.idle) { | ||
516 | init_idle(c_idle.idle, cpu); | ||
517 | goto do_rest; | ||
518 | } | ||
519 | |||
520 | schedule_work(&c_idle.work); | ||
521 | wait_for_completion(&c_idle.done); | ||
522 | |||
523 | if (IS_ERR(c_idle.idle)) | ||
524 | panic("failed fork for CPU %d", cpu); | ||
525 | |||
526 | set_idle_for_cpu(cpu, c_idle.idle); | ||
527 | |||
528 | do_rest: | ||
529 | task_for_booting_cpu = c_idle.idle; | ||
530 | 474 | ||
475 | task_for_booting_cpu = idle; | ||
531 | Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); | 476 | Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); |
532 | 477 | ||
533 | set_brendez_area(cpu); | 478 | set_brendez_area(cpu); |
@@ -793,7 +738,7 @@ set_cpu_sibling_map(int cpu) | |||
793 | } | 738 | } |
794 | 739 | ||
795 | int __cpuinit | 740 | int __cpuinit |
796 | __cpu_up (unsigned int cpu) | 741 | __cpu_up(unsigned int cpu, struct task_struct *tidle) |
797 | { | 742 | { |
798 | int ret; | 743 | int ret; |
799 | int sapicid; | 744 | int sapicid; |
@@ -811,7 +756,7 @@ __cpu_up (unsigned int cpu) | |||
811 | 756 | ||
812 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | 757 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
813 | /* Processor goes to start_secondary(), sets online flag */ | 758 | /* Processor goes to start_secondary(), sets online flag */ |
814 | ret = do_boot_cpu(sapicid, cpu); | 759 | ret = do_boot_cpu(sapicid, cpu, tidle); |
815 | if (ret < 0) | 760 | if (ret < 0) |
816 | return ret; | 761 | return ret; |
817 | 762 | ||