aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm/discontig.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /arch/ia64/mm/discontig.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/ia64/mm/discontig.c')
-rw-r--r--arch/ia64/mm/discontig.c130
1 files changed, 115 insertions, 15 deletions
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index d85ba98d9008..61620323bb60 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -22,6 +22,7 @@
22#include <linux/acpi.h> 22#include <linux/acpi.h>
23#include <linux/efi.h> 23#include <linux/efi.h>
24#include <linux/nodemask.h> 24#include <linux/nodemask.h>
25#include <linux/slab.h>
25#include <asm/pgalloc.h> 26#include <asm/pgalloc.h>
26#include <asm/tlb.h> 27#include <asm/tlb.h>
27#include <asm/meminit.h> 28#include <asm/meminit.h>
@@ -143,22 +144,120 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
143 int cpu; 144 int cpu;
144 145
145 for_each_possible_early_cpu(cpu) { 146 for_each_possible_early_cpu(cpu) {
146 if (cpu == 0) { 147 void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
147 void *cpu0_data = __cpu0_per_cpu; 148
148 __per_cpu_offset[cpu] = (char*)cpu0_data - 149 if (node != node_cpuid[cpu].nid)
149 __per_cpu_start; 150 continue;
150 } else if (node == node_cpuid[cpu].nid) { 151
151 memcpy(__va(cpu_data), __phys_per_cpu_start, 152 memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
152 __per_cpu_end - __per_cpu_start); 153 __per_cpu_offset[cpu] = (char *)__va(cpu_data) -
153 __per_cpu_offset[cpu] = (char*)__va(cpu_data) - 154 __per_cpu_start;
154 __per_cpu_start; 155
155 cpu_data += PERCPU_PAGE_SIZE; 156 /*
156 } 157 * percpu area for cpu0 is moved from the __init area
158 * which is setup by head.S and used till this point.
159 * Update ar.k3. This move is ensures that percpu
160 * area for cpu0 is on the correct node and its
161 * virtual address isn't insanely far from other
162 * percpu areas which is important for congruent
163 * percpu allocator.
164 */
165 if (cpu == 0)
166 ia64_set_kr(IA64_KR_PER_CPU_DATA,
167 (unsigned long)cpu_data -
168 (unsigned long)__per_cpu_start);
169
170 cpu_data += PERCPU_PAGE_SIZE;
157 } 171 }
158#endif 172#endif
159 return cpu_data; 173 return cpu_data;
160} 174}
161 175
176#ifdef CONFIG_SMP
177/**
178 * setup_per_cpu_areas - setup percpu areas
179 *
180 * Arch code has already allocated and initialized percpu areas. All
181 * this function has to do is to teach the determined layout to the
182 * dynamic percpu allocator, which happens to be more complex than
183 * creating whole new ones using helpers.
184 */
185void __init setup_per_cpu_areas(void)
186{
187 struct pcpu_alloc_info *ai;
188 struct pcpu_group_info *uninitialized_var(gi);
189 unsigned int *cpu_map;
190 void *base;
191 unsigned long base_offset;
192 unsigned int cpu;
193 ssize_t static_size, reserved_size, dyn_size;
194 int node, prev_node, unit, nr_units, rc;
195
196 ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
197 if (!ai)
198 panic("failed to allocate pcpu_alloc_info");
199 cpu_map = ai->groups[0].cpu_map;
200
201 /* determine base */
202 base = (void *)ULONG_MAX;
203 for_each_possible_cpu(cpu)
204 base = min(base,
205 (void *)(__per_cpu_offset[cpu] + __per_cpu_start));
206 base_offset = (void *)__per_cpu_start - base;
207
208 /* build cpu_map, units are grouped by node */
209 unit = 0;
210 for_each_node(node)
211 for_each_possible_cpu(cpu)
212 if (node == node_cpuid[cpu].nid)
213 cpu_map[unit++] = cpu;
214 nr_units = unit;
215
216 /* set basic parameters */
217 static_size = __per_cpu_end - __per_cpu_start;
218 reserved_size = PERCPU_MODULE_RESERVE;
219 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
220 if (dyn_size < 0)
221 panic("percpu area overflow static=%zd reserved=%zd\n",
222 static_size, reserved_size);
223
224 ai->static_size = static_size;
225 ai->reserved_size = reserved_size;
226 ai->dyn_size = dyn_size;
227 ai->unit_size = PERCPU_PAGE_SIZE;
228 ai->atom_size = PAGE_SIZE;
229 ai->alloc_size = PERCPU_PAGE_SIZE;
230
231 /*
232 * CPUs are put into groups according to node. Walk cpu_map
233 * and create new groups at node boundaries.
234 */
235 prev_node = -1;
236 ai->nr_groups = 0;
237 for (unit = 0; unit < nr_units; unit++) {
238 cpu = cpu_map[unit];
239 node = node_cpuid[cpu].nid;
240
241 if (node == prev_node) {
242 gi->nr_units++;
243 continue;
244 }
245 prev_node = node;
246
247 gi = &ai->groups[ai->nr_groups++];
248 gi->nr_units = 1;
249 gi->base_offset = __per_cpu_offset[cpu] + base_offset;
250 gi->cpu_map = &cpu_map[unit];
251 }
252
253 rc = pcpu_setup_first_chunk(ai, base);
254 if (rc)
255 panic("failed to setup percpu area (err=%d)", rc);
256
257 pcpu_free_alloc_info(ai);
258}
259#endif
260
162/** 261/**
163 * fill_pernode - initialize pernode data. 262 * fill_pernode - initialize pernode data.
164 * @node: the node id. 263 * @node: the node id.
@@ -352,7 +451,8 @@ static void __init initialize_pernode_data(void)
352 /* Set the node_data pointer for each per-cpu struct */ 451 /* Set the node_data pointer for each per-cpu struct */
353 for_each_possible_early_cpu(cpu) { 452 for_each_possible_early_cpu(cpu) {
354 node = node_cpuid[cpu].nid; 453 node = node_cpuid[cpu].nid;
355 per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; 454 per_cpu(ia64_cpu_info, cpu).node_data =
455 mem_data[node].node_data;
356 } 456 }
357#else 457#else
358 { 458 {
@@ -360,7 +460,7 @@ static void __init initialize_pernode_data(void)
360 cpu = 0; 460 cpu = 0;
361 node = node_cpuid[cpu].nid; 461 node = node_cpuid[cpu].nid;
362 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + 462 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
363 ((char *)&per_cpu__cpu_info - __per_cpu_start)); 463 ((char *)&ia64_cpu_info - __per_cpu_start));
364 cpu0_cpu_info->node_data = mem_data[node].node_data; 464 cpu0_cpu_info->node_data = mem_data[node].node_data;
365 } 465 }
366#endif /* CONFIG_SMP */ 466#endif /* CONFIG_SMP */
@@ -666,9 +766,9 @@ void __init paging_init(void)
666 sparse_init(); 766 sparse_init();
667 767
668#ifdef CONFIG_VIRTUAL_MEM_MAP 768#ifdef CONFIG_VIRTUAL_MEM_MAP
669 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 769 VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
670 sizeof(struct page)); 770 sizeof(struct page));
671 vmem_map = (struct page *) vmalloc_end; 771 vmem_map = (struct page *) VMALLOC_END;
672 efi_memmap_walk(create_mem_map_page_table, NULL); 772 efi_memmap_walk(create_mem_map_page_table, NULL);
673 printk("Virtual mem_map starts at 0x%p\n", vmem_map); 773 printk("Virtual mem_map starts at 0x%p\n", vmem_map);
674#endif 774#endif