aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-04-04 18:23:59 -0400
committerH. Peter Anvin <hpa@zytor.com>2011-04-06 20:57:50 -0400
commit198bd06bbfde2984027e91f64c55eb19a7034a27 (patch)
tree87c611e56b58bf13ac59d52a11e7332aea70a22c /arch
parent1d85b61baf0334dd6bb88261bec42b808204d694 (diff)
x86-32, numa: Remove redundant node_remap_size[]
Remap area size can be determined from node_remap_start_vaddr[] and node_remap_end_vaddr[] making node_remap_size[] redundant. Remove it. While at it, make resume_map_numa_kva() use @nr_pages for number of pages instead of @size. Signed-off-by: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/1301955840-7246-14-git-send-email-tj@kernel.org Acked-by: Yinghai Lu <yinghai@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/mm/numa_32.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 960ea7bc0ac7..f325e6fab75b 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -104,7 +104,6 @@ extern unsigned long highend_pfn, highstart_pfn;
104 104
105#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) 105#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
106 106
107static unsigned long node_remap_size[MAX_NUMNODES];
108static void *node_remap_start_vaddr[MAX_NUMNODES]; 107static void *node_remap_start_vaddr[MAX_NUMNODES];
109void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); 108void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
110 109
@@ -214,15 +213,16 @@ void resume_map_numa_kva(pgd_t *pgd_base)
214 int node; 213 int node;
215 214
216 for_each_online_node(node) { 215 for_each_online_node(node) {
217 unsigned long start_va, start_pfn, size, pfn; 216 unsigned long start_va, start_pfn, nr_pages, pfn;
218 217
219 start_va = (unsigned long)node_remap_start_vaddr[node]; 218 start_va = (unsigned long)node_remap_start_vaddr[node];
220 start_pfn = node_remap_start_pfn[node]; 219 start_pfn = node_remap_start_pfn[node];
221 size = node_remap_size[node]; 220 nr_pages = (node_remap_end_vaddr[node] -
221 node_remap_start_vaddr[node]) >> PAGE_SHIFT;
222 222
223 printk(KERN_DEBUG "%s: node %d\n", __func__, node); 223 printk(KERN_DEBUG "%s: node %d\n", __func__, node);
224 224
225 for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) { 225 for (pfn = 0; pfn < nr_pages; pfn += PTRS_PER_PTE) {
226 unsigned long vaddr = start_va + (pfn << PAGE_SHIFT); 226 unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
227 pgd_t *pgd = pgd_base + pgd_index(vaddr); 227 pgd_t *pgd = pgd_base + pgd_index(vaddr);
228 pud_t *pud = pud_offset(pgd, vaddr); 228 pud_t *pud = pud_offset(pgd, vaddr);
@@ -294,8 +294,6 @@ static __init void init_alloc_remap(int nid)
294 294
295 /* initialize remap allocator parameters */ 295 /* initialize remap allocator parameters */
296 node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT; 296 node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT;
297 node_remap_size[nid] = size >> PAGE_SHIFT;
298
299 node_remap_start_vaddr[nid] = remap_va; 297 node_remap_start_vaddr[nid] = remap_va;
300 node_remap_end_vaddr[nid] = remap_va + size; 298 node_remap_end_vaddr[nid] = remap_va + size;
301 node_remap_alloc_vaddr[nid] = remap_va; 299 node_remap_alloc_vaddr[nid] = remap_va;