aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-04-04 18:23:58 -0400
committerH. Peter Anvin <hpa@zytor.com>2011-04-06 20:57:44 -0400
commit1d85b61baf0334dd6bb88261bec42b808204d694 (patch)
treeadbd8ea60cb19c5137b5691e028c4df8e1b8a6fb /arch/x86/mm
parentb2e3e4fa3eee752b893687783f2a427106c93423 (diff)
x86-32, numa: Remove now useless node_remap_offset[]
With lowmem address reservation moved into init_alloc_remap(), node_remap_offset[] is no longer useful. Remove it and related offset handling code. Signed-off-by: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/1301955840-7246-13-git-send-email-tj@kernel.org Acked-by: Yinghai Lu <yinghai@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/numa_32.c17
1 files changed, 6 insertions, 11 deletions
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 0184a9f5a345..960ea7bc0ac7 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -187,7 +187,6 @@ static void __init allocate_pgdat(int nid)
187static unsigned long node_remap_start_pfn[MAX_NUMNODES]; 187static unsigned long node_remap_start_pfn[MAX_NUMNODES];
188static void *node_remap_end_vaddr[MAX_NUMNODES]; 188static void *node_remap_end_vaddr[MAX_NUMNODES];
189static void *node_remap_alloc_vaddr[MAX_NUMNODES]; 189static void *node_remap_alloc_vaddr[MAX_NUMNODES];
190static unsigned long node_remap_offset[MAX_NUMNODES];
191 190
192void *alloc_remap(int nid, unsigned long size) 191void *alloc_remap(int nid, unsigned long size)
193{ 192{
@@ -239,7 +238,7 @@ void resume_map_numa_kva(pgd_t *pgd_base)
239} 238}
240#endif 239#endif
241 240
242static __init unsigned long init_alloc_remap(int nid, unsigned long offset) 241static __init void init_alloc_remap(int nid)
243{ 242{
244 unsigned long size, pfn; 243 unsigned long size, pfn;
245 u64 node_pa, remap_pa; 244 u64 node_pa, remap_pa;
@@ -252,9 +251,9 @@ static __init unsigned long init_alloc_remap(int nid, unsigned long offset)
252 printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n", 251 printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
253 nid, node_start_pfn[nid], node_end_pfn[nid]); 252 nid, node_start_pfn[nid], node_end_pfn[nid]);
254 if (node_start_pfn[nid] > max_pfn) 253 if (node_start_pfn[nid] > max_pfn)
255 return 0; 254 return;
256 if (!node_end_pfn[nid]) 255 if (!node_end_pfn[nid])
257 return 0; 256 return;
258 if (node_end_pfn[nid] > max_pfn) 257 if (node_end_pfn[nid] > max_pfn)
259 node_end_pfn[nid] = max_pfn; 258 node_end_pfn[nid] = max_pfn;
260 259
@@ -271,7 +270,7 @@ static __init unsigned long init_alloc_remap(int nid, unsigned long offset)
271 if (node_pa == MEMBLOCK_ERROR) { 270 if (node_pa == MEMBLOCK_ERROR) {
272 pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n", 271 pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
273 size, nid); 272 size, nid);
274 return 0; 273 return;
275 } 274 }
276 memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM"); 275 memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM");
277 276
@@ -282,7 +281,7 @@ static __init unsigned long init_alloc_remap(int nid, unsigned long offset)
282 pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n", 281 pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
283 size, nid); 282 size, nid);
284 memblock_x86_free_range(node_pa, node_pa + size); 283 memblock_x86_free_range(node_pa, node_pa + size);
285 return 0; 284 return;
286 } 285 }
287 memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG"); 286 memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG");
288 remap_va = phys_to_virt(remap_pa); 287 remap_va = phys_to_virt(remap_pa);
@@ -296,7 +295,6 @@ static __init unsigned long init_alloc_remap(int nid, unsigned long offset)
296 /* initialize remap allocator parameters */ 295 /* initialize remap allocator parameters */
297 node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT; 296 node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT;
298 node_remap_size[nid] = size >> PAGE_SHIFT; 297 node_remap_size[nid] = size >> PAGE_SHIFT;
299 node_remap_offset[nid] = offset;
300 298
301 node_remap_start_vaddr[nid] = remap_va; 299 node_remap_start_vaddr[nid] = remap_va;
302 node_remap_end_vaddr[nid] = remap_va + size; 300 node_remap_end_vaddr[nid] = remap_va + size;
@@ -304,13 +302,10 @@ static __init unsigned long init_alloc_remap(int nid, unsigned long offset)
304 302
305 printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n", 303 printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n",
306 nid, node_pa, node_pa + size, remap_va, remap_va + size); 304 nid, node_pa, node_pa + size, remap_va, remap_va + size);
307
308 return size >> PAGE_SHIFT;
309} 305}
310 306
311void __init initmem_init(void) 307void __init initmem_init(void)
312{ 308{
313 unsigned long reserve_pages = 0;
314 int nid; 309 int nid;
315 310
316 /* 311 /*
@@ -325,7 +320,7 @@ void __init initmem_init(void)
325 numa_init_array(); 320 numa_init_array();
326 321
327 for_each_online_node(nid) 322 for_each_online_node(nid)
328 reserve_pages += init_alloc_remap(nid, reserve_pages); 323 init_alloc_remap(nid);
329 324
330#ifdef CONFIG_HIGHMEM 325#ifdef CONFIG_HIGHMEM
331 highstart_pfn = highend_pfn = max_pfn; 326 highstart_pfn = highend_pfn = max_pfn;