aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2007-11-06 18:14:45 -0500
committerTony Luck <tony.luck@intel.com>2007-11-06 18:14:45 -0500
commit4b9ddc7cf272a0af321c22ef9c00d76384402d48 (patch)
tree14b91c54bd8d77e8218ec5df84f5cc5cd469077e /arch/ia64
parent4b07ae9b9d7b05a63e3ece32a666041949b7f421 (diff)
[IA64] Fix section mismatch in contig.c version of per_cpu_init()
There is a section mismatch when building CONFIG_FLATMEM=y kernels that also have CONFIG_HOTPLUG_CPU=y WARNING: vmlinux.o(.text+0x5a902): Section mismatch: reference to \ .init.text:__alloc_bootmem (between 'per_cpu_init' and 'count_pages') The issue occurs because per_cpu_init() in mm/contig.c is marked __cpuinit (which is #define'd to nothing on a hot plug cpu configuration) call __alloc_bootmem() (which is an __init function). The usage is actually safe because the __alloc_bootmem() is inside an "if (first_time)" test so that the call is only made while it is still legal to do so. But the warning is irritating. Move the allocation to find_memory(). Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/mm/contig.c74
1 files changed, 41 insertions, 33 deletions
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index d3c538be466c..7e9c275ea148 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -146,6 +146,46 @@ find_bootmap_location (unsigned long start, unsigned long end, void *arg)
146 return 0; 146 return 0;
147} 147}
148 148
149#ifdef CONFIG_SMP
150static void *cpu_data;
151/**
152 * per_cpu_init - setup per-cpu variables
153 *
154 * Allocate and setup per-cpu data areas.
155 */
156void * __cpuinit
157per_cpu_init (void)
158{
159 int cpu;
160 static int first_time=1;
161
162 /*
163 * get_free_pages() cannot be used before cpu_init() done. BSP
164 * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
165 * get_zeroed_page().
166 */
167 if (first_time) {
168 first_time=0;
169 for (cpu = 0; cpu < NR_CPUS; cpu++) {
170 memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
171 __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
172 cpu_data += PERCPU_PAGE_SIZE;
173 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
174 }
175 }
176 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
177}
178
179static inline void
180alloc_per_cpu_data(void)
181{
182 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
183 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
184}
185#else
186#define alloc_per_cpu_data() do { } while (0)
187#endif /* CONFIG_SMP */
188
149/** 189/**
150 * find_memory - setup memory map 190 * find_memory - setup memory map
151 * 191 *
@@ -182,41 +222,9 @@ find_memory (void)
182 222
183 find_initrd(); 223 find_initrd();
184 224
225 alloc_per_cpu_data();
185} 226}
186 227
187#ifdef CONFIG_SMP
188/**
189 * per_cpu_init - setup per-cpu variables
190 *
191 * Allocate and setup per-cpu data areas.
192 */
193void * __cpuinit
194per_cpu_init (void)
195{
196 void *cpu_data;
197 int cpu;
198 static int first_time=1;
199
200 /*
201 * get_free_pages() cannot be used before cpu_init() done. BSP
202 * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
203 * get_zeroed_page().
204 */
205 if (first_time) {
206 first_time=0;
207 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
208 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
209 for (cpu = 0; cpu < NR_CPUS; cpu++) {
210 memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
211 __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
212 cpu_data += PERCPU_PAGE_SIZE;
213 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
214 }
215 }
216 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
217}
218#endif /* CONFIG_SMP */
219
220static int 228static int
221count_pages (u64 start, u64 end, void *arg) 229count_pages (u64 start, u64 end, void *arg)
222{ 230{