aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-21 22:43:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-21 22:43:57 -0400
commitbf67f3a5c456a18f2e8d062f7e88506ef2cd9837 (patch)
tree2a2324b2572162059307db82f9238eeb25673a77 /kernel/fork.c
parent226da0dbc84ed97f448523e2a4cb91c27fa68ed9 (diff)
parent203dacbdca977bedaba61ad2fca75d934060a5d5 (diff)
Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull smp hotplug cleanups from Thomas Gleixner: "This series is merily a cleanup of code copied around in arch/* and not changing any of the real cpu hotplug horrors yet. I wish I'd had something more substantial for 3.5, but I underestimated the lurking horror..." Fix up trivial conflicts in arch/{arm,sparc,x86}/Kconfig and arch/sparc/include/asm/thread_info_32.h * 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (79 commits) um: Remove leftover declaration of alloc_task_struct_node() task_allocator: Use config switches instead of magic defines sparc: Use common threadinfo allocator score: Use common threadinfo allocator sh-use-common-threadinfo-allocator mn10300: Use common threadinfo allocator powerpc: Use common threadinfo allocator mips: Use common threadinfo allocator hexagon: Use common threadinfo allocator m32r: Use common threadinfo allocator frv: Use common threadinfo allocator cris: Use common threadinfo allocator x86: Use common threadinfo allocator c6x: Use common threadinfo allocator fork: Provide kmemcache based thread_info allocator tile: Use common threadinfo allocator fork: Provide weak arch_release_[task_struct|thread_info] functions fork: Move thread info gfp flags to header fork: Remove the weak insanity sh: Remove cpu_idle_wait() ...
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c69
1 files changed, 49 insertions, 20 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 687a15d56243..9f9b296fa6df 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -112,32 +112,67 @@ int nr_processes(void)
112 return total; 112 return total;
113} 113}
114 114
115#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 115#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
116# define alloc_task_struct_node(node) \
117 kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node)
118# define free_task_struct(tsk) \
119 kmem_cache_free(task_struct_cachep, (tsk))
120static struct kmem_cache *task_struct_cachep; 116static struct kmem_cache *task_struct_cachep;
117
118static inline struct task_struct *alloc_task_struct_node(int node)
119{
120 return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
121}
122
123void __weak arch_release_task_struct(struct task_struct *tsk) { }
124
125static inline void free_task_struct(struct task_struct *tsk)
126{
127 arch_release_task_struct(tsk);
128 kmem_cache_free(task_struct_cachep, tsk);
129}
121#endif 130#endif
122 131
123#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR 132#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
133void __weak arch_release_thread_info(struct thread_info *ti) { }
134
135/*
136 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
137 * kmemcache based allocator.
138 */
139# if THREAD_SIZE >= PAGE_SIZE
124static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, 140static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
125 int node) 141 int node)
126{ 142{
127#ifdef CONFIG_DEBUG_STACK_USAGE 143 struct page *page = alloc_pages_node(node, THREADINFO_GFP,
128 gfp_t mask = GFP_KERNEL | __GFP_ZERO; 144 THREAD_SIZE_ORDER);
129#else
130 gfp_t mask = GFP_KERNEL;
131#endif
132 struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
133 145
134 return page ? page_address(page) : NULL; 146 return page ? page_address(page) : NULL;
135} 147}
136 148
137static inline void free_thread_info(struct thread_info *ti) 149static inline void free_thread_info(struct thread_info *ti)
138{ 150{
151 arch_release_thread_info(ti);
139 free_pages((unsigned long)ti, THREAD_SIZE_ORDER); 152 free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
140} 153}
154# else
155static struct kmem_cache *thread_info_cache;
156
157static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
158 int node)
159{
160 return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
161}
162
163static void free_thread_info(struct thread_info *ti)
164{
165 arch_release_thread_info(ti);
166 kmem_cache_free(thread_info_cache, ti);
167}
168
169void thread_info_cache_init(void)
170{
171 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
172 THREAD_SIZE, 0, NULL);
173 BUG_ON(thread_info_cache == NULL);
174}
175# endif
141#endif 176#endif
142 177
143/* SLAB cache for signal_struct structures (tsk->signal) */ 178/* SLAB cache for signal_struct structures (tsk->signal) */
@@ -204,17 +239,11 @@ void __put_task_struct(struct task_struct *tsk)
204} 239}
205EXPORT_SYMBOL_GPL(__put_task_struct); 240EXPORT_SYMBOL_GPL(__put_task_struct);
206 241
207/* 242void __init __weak arch_task_cache_init(void) { }
208 * macro override instead of weak attribute alias, to workaround
209 * gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions.
210 */
211#ifndef arch_task_cache_init
212#define arch_task_cache_init()
213#endif
214 243
215void __init fork_init(unsigned long mempages) 244void __init fork_init(unsigned long mempages)
216{ 245{
217#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 246#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
218#ifndef ARCH_MIN_TASKALIGN 247#ifndef ARCH_MIN_TASKALIGN
219#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES 248#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
220#endif 249#endif