aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichal Nazarewicz <mina86@mina86.com>2012-01-30 07:24:03 -0500
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-05-21 09:09:29 -0400
commit85aa125f001f87f96a72e9e6ee515490843b1202 (patch)
tree4191169ebd6fa2a4463f9dd04fd3cb4af41ca814
parent03d44192f69a45d780ba124f691e76020a44ebae (diff)
mm: compaction: introduce isolate_freepages_range()
This commit introduces isolate_freepages_range() function which generalises isolate_freepages_block() so that it can be used on arbitrary PFN ranges. isolate_freepages_block() is left with only minor changes. Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Tested-by: Rob Clark <rob.clark@linaro.org> Tested-by: Ohad Ben-Cohen <ohad@wizery.com> Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org> Tested-by: Robert Nelson <robertcnelson@gmail.com> Tested-by: Barry Song <Baohua.Song@csr.com>
-rw-r--r--mm/compaction.c111
1 files changed, 93 insertions, 18 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index d9d7b35d3933..06b198fa9abe 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -54,24 +54,20 @@ static unsigned long release_freepages(struct list_head *freelist)
54 return count; 54 return count;
55} 55}
56 56
57/* Isolate free pages onto a private freelist. Must hold zone->lock */ 57/*
58static unsigned long isolate_freepages_block(struct zone *zone, 58 * Isolate free pages onto a private freelist. Caller must hold zone->lock.
59 unsigned long blockpfn, 59 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
60 struct list_head *freelist) 60 * pages inside of the pageblock (even though it may still end up isolating
61 * some pages).
62 */
63static unsigned long isolate_freepages_block(unsigned long blockpfn,
64 unsigned long end_pfn,
65 struct list_head *freelist,
66 bool strict)
61{ 67{
62 unsigned long zone_end_pfn, end_pfn;
63 int nr_scanned = 0, total_isolated = 0; 68 int nr_scanned = 0, total_isolated = 0;
64 struct page *cursor; 69 struct page *cursor;
65 70
66 /* Get the last PFN we should scan for free pages at */
67 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
68 end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
69
70 /* Find the first usable PFN in the block to initialse page cursor */
71 for (; blockpfn < end_pfn; blockpfn++) {
72 if (pfn_valid_within(blockpfn))
73 break;
74 }
75 cursor = pfn_to_page(blockpfn); 71 cursor = pfn_to_page(blockpfn);
76 72
77 /* Isolate free pages. This assumes the block is valid */ 73 /* Isolate free pages. This assumes the block is valid */
@@ -79,15 +75,23 @@ static unsigned long isolate_freepages_block(struct zone *zone,
79 int isolated, i; 75 int isolated, i;
80 struct page *page = cursor; 76 struct page *page = cursor;
81 77
82 if (!pfn_valid_within(blockpfn)) 78 if (!pfn_valid_within(blockpfn)) {
79 if (strict)
80 return 0;
83 continue; 81 continue;
82 }
84 nr_scanned++; 83 nr_scanned++;
85 84
86 if (!PageBuddy(page)) 85 if (!PageBuddy(page)) {
86 if (strict)
87 return 0;
87 continue; 88 continue;
89 }
88 90
89 /* Found a free page, break it into order-0 pages */ 91 /* Found a free page, break it into order-0 pages */
90 isolated = split_free_page(page); 92 isolated = split_free_page(page);
93 if (!isolated && strict)
94 return 0;
91 total_isolated += isolated; 95 total_isolated += isolated;
92 for (i = 0; i < isolated; i++) { 96 for (i = 0; i < isolated; i++) {
93 list_add(&page->lru, freelist); 97 list_add(&page->lru, freelist);
@@ -105,6 +109,73 @@ static unsigned long isolate_freepages_block(struct zone *zone,
105 return total_isolated; 109 return total_isolated;
106} 110}
107 111
112/**
113 * isolate_freepages_range() - isolate free pages.
114 * @start_pfn: The first PFN to start isolating.
115 * @end_pfn: The one-past-last PFN.
116 *
117 * Non-free pages, invalid PFNs, or zone boundaries within the
118 * [start_pfn, end_pfn) range are considered errors, cause function to
119 * undo its actions and return zero.
120 *
121 * Otherwise, function returns one-past-the-last PFN of isolated page
122 * (which may be greater then end_pfn if end fell in a middle of
123 * a free page).
124 */
125static unsigned long
126isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
127{
128 unsigned long isolated, pfn, block_end_pfn, flags;
129 struct zone *zone = NULL;
130 LIST_HEAD(freelist);
131
132 if (pfn_valid(start_pfn))
133 zone = page_zone(pfn_to_page(start_pfn));
134
135 for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
136 if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
137 break;
138
139 /*
140 * On subsequent iterations ALIGN() is actually not needed,
141 * but we keep it that we not to complicate the code.
142 */
143 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
144 block_end_pfn = min(block_end_pfn, end_pfn);
145
146 spin_lock_irqsave(&zone->lock, flags);
147 isolated = isolate_freepages_block(pfn, block_end_pfn,
148 &freelist, true);
149 spin_unlock_irqrestore(&zone->lock, flags);
150
151 /*
152 * In strict mode, isolate_freepages_block() returns 0 if
153 * there are any holes in the block (ie. invalid PFNs or
154 * non-free pages).
155 */
156 if (!isolated)
157 break;
158
159 /*
160 * If we managed to isolate pages, it is always (1 << n) *
161 * pageblock_nr_pages for some non-negative n. (Max order
162 * page may span two pageblocks).
163 */
164 }
165
166 /* split_free_page does not map the pages */
167 map_pages(&freelist);
168
169 if (pfn < end_pfn) {
170 /* Loop terminated early, cleanup. */
171 release_freepages(&freelist);
172 return 0;
173 }
174
175 /* We don't use freelists for anything. */
176 return pfn;
177}
178
108/* Returns true if the page is within a block suitable for migration to */ 179/* Returns true if the page is within a block suitable for migration to */
109static bool suitable_migration_target(struct page *page) 180static bool suitable_migration_target(struct page *page)
110{ 181{
@@ -145,7 +216,7 @@ static void isolate_freepages(struct zone *zone,
145 struct compact_control *cc) 216 struct compact_control *cc)
146{ 217{
147 struct page *page; 218 struct page *page;
148 unsigned long high_pfn, low_pfn, pfn; 219 unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
149 unsigned long flags; 220 unsigned long flags;
150 int nr_freepages = cc->nr_freepages; 221 int nr_freepages = cc->nr_freepages;
151 struct list_head *freelist = &cc->freepages; 222 struct list_head *freelist = &cc->freepages;
@@ -165,6 +236,8 @@ static void isolate_freepages(struct zone *zone,
165 */ 236 */
166 high_pfn = min(low_pfn, pfn); 237 high_pfn = min(low_pfn, pfn);
167 238
239 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
240
168 /* 241 /*
169 * Isolate free pages until enough are available to migrate the 242 * Isolate free pages until enough are available to migrate the
170 * pages on cc->migratepages. We stop searching if the migrate 243 * pages on cc->migratepages. We stop searching if the migrate
@@ -201,7 +274,9 @@ static void isolate_freepages(struct zone *zone,
201 isolated = 0; 274 isolated = 0;
202 spin_lock_irqsave(&zone->lock, flags); 275 spin_lock_irqsave(&zone->lock, flags);
203 if (suitable_migration_target(page)) { 276 if (suitable_migration_target(page)) {
204 isolated = isolate_freepages_block(zone, pfn, freelist); 277 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
278 isolated = isolate_freepages_block(pfn, end_pfn,
279 freelist, false);
205 nr_freepages += isolated; 280 nr_freepages += isolated;
206 } 281 }
207 spin_unlock_irqrestore(&zone->lock, flags); 282 spin_unlock_irqrestore(&zone->lock, flags);