aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2015-06-30 17:56:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-30 22:44:56 -0400
commit8a942fdea560d4ac0e9d9fabcd5201ad20e0c382 (patch)
tree452c0f5acccf96ffc54748a44ab5bc788f7dbffc /arch/ia64/mm
parentd70ddd7a5d9aa335f9b4b0c3d879e1e70ee1e4e3 (diff)
mm: meminit: make __early_pfn_to_nid SMP-safe and introduce meminit_pfn_in_nid
__early_pfn_to_nid() use static variables to cache recent lookups as memblock lookups are very expensive but it assumes that memory initialisation is single-threaded. Parallel initialisation of struct pages will break that assumption so this patch makes __early_pfn_to_nid() SMP-safe by requiring the caller to cache recent search information. early_pfn_to_nid() keeps the same interface but is only safe to use early in boot due to the use of a global static variable. meminit_pfn_in_nid() is an SMP-safe version that callers must maintain their own state for. Signed-off-by: Mel Gorman <mgorman@suse.de> Tested-by: Nate Zimmer <nzimmer@sgi.com> Tested-by: Waiman Long <waiman.long@hp.com> Tested-by: Daniel J Blueman <daniel@numascale.com> Acked-by: Pekka Enberg <penberg@kernel.org> Cc: Robin Holt <robinmholt@gmail.com> Cc: Nate Zimmer <nzimmer@sgi.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Waiman Long <waiman.long@hp.com> Cc: Scott Norton <scott.norton@hp.com> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r--arch/ia64/mm/numa.c19
1 files changed, 7 insertions, 12 deletions
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index ea21d4cad540..aa19b7ac8222 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -58,27 +58,22 @@ paddr_to_nid(unsigned long paddr)
58 * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where 58 * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where
59 * the section resides. 59 * the section resides.
60 */ 60 */
61int __meminit __early_pfn_to_nid(unsigned long pfn) 61int __meminit __early_pfn_to_nid(unsigned long pfn,
62 struct mminit_pfnnid_cache *state)
62{ 63{
63 int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec; 64 int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec;
64 /*
65 * NOTE: The following SMP-unsafe globals are only used early in boot
66 * when the kernel is running single-threaded.
67 */
68 static int __meminitdata last_ssec, last_esec;
69 static int __meminitdata last_nid;
70 65
71 if (section >= last_ssec && section < last_esec) 66 if (section >= state->last_start && section < state->last_end)
72 return last_nid; 67 return state->last_nid;
73 68
74 for (i = 0; i < num_node_memblks; i++) { 69 for (i = 0; i < num_node_memblks; i++) {
75 ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT; 70 ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT;
76 esec = (node_memblk[i].start_paddr + node_memblk[i].size + 71 esec = (node_memblk[i].start_paddr + node_memblk[i].size +
77 ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT; 72 ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT;
78 if (section >= ssec && section < esec) { 73 if (section >= ssec && section < esec) {
79 last_ssec = ssec; 74 state->last_start = ssec;
80 last_esec = esec; 75 state->last_end = esec;
81 last_nid = node_memblk[i].nid; 76 state->last_nid = node_memblk[i].nid;
82 return node_memblk[i].nid; 77 return node_memblk[i].nid;
83 } 78 }
84 } 79 }