summaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/mm/numa.c19
1 files changed, 7 insertions, 12 deletions
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index ea21d4cad540..aa19b7ac8222 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -58,27 +58,22 @@ paddr_to_nid(unsigned long paddr)
58 * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where 58 * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where
59 * the section resides. 59 * the section resides.
60 */ 60 */
61int __meminit __early_pfn_to_nid(unsigned long pfn) 61int __meminit __early_pfn_to_nid(unsigned long pfn,
62 struct mminit_pfnnid_cache *state)
62{ 63{
63 int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec; 64 int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec;
64 /*
65 * NOTE: The following SMP-unsafe globals are only used early in boot
66 * when the kernel is running single-threaded.
67 */
68 static int __meminitdata last_ssec, last_esec;
69 static int __meminitdata last_nid;
70 65
71 if (section >= last_ssec && section < last_esec) 66 if (section >= state->last_start && section < state->last_end)
72 return last_nid; 67 return state->last_nid;
73 68
74 for (i = 0; i < num_node_memblks; i++) { 69 for (i = 0; i < num_node_memblks; i++) {
75 ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT; 70 ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT;
76 esec = (node_memblk[i].start_paddr + node_memblk[i].size + 71 esec = (node_memblk[i].start_paddr + node_memblk[i].size +
77 ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT; 72 ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT;
78 if (section >= ssec && section < esec) { 73 if (section >= ssec && section < esec) {
79 last_ssec = ssec; 74 state->last_start = ssec;
80 last_esec = esec; 75 state->last_end = esec;
81 last_nid = node_memblk[i].nid; 76 state->last_nid = node_memblk[i].nid;
82 return node_memblk[i].nid; 77 return node_memblk[i].nid;
83 } 78 }
84 } 79 }