summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2015-06-30 17:57:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-30 22:44:56 -0400
commit54608c3f3a448f1e042f5d9f3b873cc8dc022f27 (patch)
tree410305a0ade6719f350a8c5f2456cec85c4e462a /mm
parent7e18adb4f80bea90d30b62158694d97c31f71d37 (diff)
mm: meminit: minimise number of pfn->page lookups during initialisation
Deferred struct page initialisation is using pfn_to_page() on every PFN unnecessarily. This patch minimises the number of lookups and scheduler checks. Signed-off-by: Mel Gorman <mgorman@suse.de> Tested-by: Nate Zimmer <nzimmer@sgi.com> Tested-by: Waiman Long <waiman.long@hp.com> Tested-by: Daniel J Blueman <daniel@numascale.com> Acked-by: Pekka Enberg <penberg@kernel.org> Cc: Robin Holt <robinmholt@gmail.com> Cc: Nate Zimmer <nzimmer@sgi.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Waiman Long <waiman.long@hp.com> Cc: Scott Norton <scott.norton@hp.com> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c29
1 files changed, 24 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c30f5a0535fd..0f770cc13450 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1091,6 +1091,7 @@ void __defermem_init deferred_init_memmap(int nid)
1091 1091
1092 for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) { 1092 for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
1093 unsigned long pfn, end_pfn; 1093 unsigned long pfn, end_pfn;
1094 struct page *page = NULL;
1094 1095
1095 end_pfn = min(walk_end, zone_end_pfn(zone)); 1096 end_pfn = min(walk_end, zone_end_pfn(zone));
1096 pfn = first_init_pfn; 1097 pfn = first_init_pfn;
@@ -1100,13 +1101,32 @@ void __defermem_init deferred_init_memmap(int nid)
1100 pfn = zone->zone_start_pfn; 1101 pfn = zone->zone_start_pfn;
1101 1102
1102 for (; pfn < end_pfn; pfn++) { 1103 for (; pfn < end_pfn; pfn++) {
1103 struct page *page; 1104 if (!pfn_valid_within(pfn))
1104
1105 if (!pfn_valid(pfn))
1106 continue; 1105 continue;
1107 1106
1108 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) 1107 /*
1108 * Ensure pfn_valid is checked every
1109 * MAX_ORDER_NR_PAGES for memory holes
1110 */
1111 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
1112 if (!pfn_valid(pfn)) {
1113 page = NULL;
1114 continue;
1115 }
1116 }
1117
1118 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1119 page = NULL;
1109 continue; 1120 continue;
1121 }
1122
1123 /* Minimise pfn page lookups and scheduler checks */
1124 if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) {
1125 page++;
1126 } else {
1127 page = pfn_to_page(pfn);
1128 cond_resched();
1129 }
1110 1130
1111 if (page->flags) { 1131 if (page->flags) {
1112 VM_BUG_ON(page_zone(page) != zone); 1132 VM_BUG_ON(page_zone(page) != zone);
@@ -1116,7 +1136,6 @@ void __defermem_init deferred_init_memmap(int nid)
1116 __init_single_page(page, pfn, zid, nid); 1136 __init_single_page(page, pfn, zid, nid);
1117 __free_pages_boot_core(page, pfn, 0); 1137 __free_pages_boot_core(page, pfn, 0);
1118 nr_pages++; 1138 nr_pages++;
1119 cond_resched();
1120 } 1139 }
1121 first_init_pfn = max(end_pfn, first_init_pfn); 1140 first_init_pfn = max(end_pfn, first_init_pfn);
1122 } 1141 }