aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMichal Nazarewicz <mina86@mina86.com>2011-12-29 07:09:50 -0500
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-05-21 09:09:30 -0400
commitff9543fd32060917beb080b1eb2d1d41ec7f39e0 (patch)
tree31d2ff2026353945cc6f574148c54b4d53167a3b /mm
parent85aa125f001f87f96a72e9e6ee515490843b1202 (diff)
mm: compaction: export some of the functions
This commit exports some of the functions from compaction.c file outside of it adding their declaration into internal.h header file so that other mm related code can use them. This forced compaction.c to always be compiled (as opposed to being compiled only if CONFIG_COMPACTION is defined) but as to avoid introducing code that user did not ask for, part of the compaction.c is now wrapped in on #ifdef. Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Tested-by: Rob Clark <rob.clark@linaro.org> Tested-by: Ohad Ben-Cohen <ohad@wizery.com> Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org> Tested-by: Robert Nelson <robertcnelson@gmail.com> Tested-by: Barry Song <Baohua.Song@csr.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/Makefile3
-rw-r--r--mm/compaction.c328
-rw-r--r--mm/internal.h33
3 files changed, 191 insertions, 173 deletions
diff --git a/mm/Makefile b/mm/Makefile
index 50ec00ef2a0e..8aada89efbbb 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -13,7 +13,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
13 readahead.o swap.o truncate.o vmscan.o shmem.o \ 13 readahead.o swap.o truncate.o vmscan.o shmem.o \
14 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ 14 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
15 page_isolation.o mm_init.o mmu_context.o percpu.o \ 15 page_isolation.o mm_init.o mmu_context.o percpu.o \
16 $(mmu-y) 16 compaction.o $(mmu-y)
17obj-y += init-mm.o 17obj-y += init-mm.o
18 18
19ifdef CONFIG_NO_BOOTMEM 19ifdef CONFIG_NO_BOOTMEM
@@ -32,7 +32,6 @@ obj-$(CONFIG_NUMA) += mempolicy.o
32obj-$(CONFIG_SPARSEMEM) += sparse.o 32obj-$(CONFIG_SPARSEMEM) += sparse.o
33obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o 33obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
34obj-$(CONFIG_SLOB) += slob.o 34obj-$(CONFIG_SLOB) += slob.o
35obj-$(CONFIG_COMPACTION) += compaction.o
36obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o 35obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
37obj-$(CONFIG_KSM) += ksm.o 36obj-$(CONFIG_KSM) += ksm.o
38obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o 37obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
diff --git a/mm/compaction.c b/mm/compaction.c
index 06b198fa9abe..7a92e418a187 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -16,30 +16,11 @@
16#include <linux/sysfs.h> 16#include <linux/sysfs.h>
17#include "internal.h" 17#include "internal.h"
18 18
19#if defined CONFIG_COMPACTION || defined CONFIG_CMA
20
19#define CREATE_TRACE_POINTS 21#define CREATE_TRACE_POINTS
20#include <trace/events/compaction.h> 22#include <trace/events/compaction.h>
21 23
22/*
23 * compact_control is used to track pages being migrated and the free pages
24 * they are being migrated to during memory compaction. The free_pfn starts
25 * at the end of a zone and migrate_pfn begins at the start. Movable pages
26 * are moved to the end of a zone during a compaction run and the run
27 * completes when free_pfn <= migrate_pfn
28 */
29struct compact_control {
30 struct list_head freepages; /* List of free pages to migrate to */
31 struct list_head migratepages; /* List of pages being migrated */
32 unsigned long nr_freepages; /* Number of isolated free pages */
33 unsigned long nr_migratepages; /* Number of pages to migrate */
34 unsigned long free_pfn; /* isolate_freepages search base */
35 unsigned long migrate_pfn; /* isolate_migratepages search base */
36 bool sync; /* Synchronous migration */
37
38 int order; /* order a direct compactor needs */
39 int migratetype; /* MOVABLE, RECLAIMABLE etc */
40 struct zone *zone;
41};
42
43static unsigned long release_freepages(struct list_head *freelist) 24static unsigned long release_freepages(struct list_head *freelist)
44{ 25{
45 struct page *page, *next; 26 struct page *page, *next;
@@ -54,6 +35,16 @@ static unsigned long release_freepages(struct list_head *freelist)
54 return count; 35 return count;
55} 36}
56 37
38static void map_pages(struct list_head *list)
39{
40 struct page *page;
41
42 list_for_each_entry(page, list, lru) {
43 arch_alloc_page(page, 0);
44 kernel_map_pages(page, 1, 1);
45 }
46}
47
57/* 48/*
58 * Isolate free pages onto a private freelist. Caller must hold zone->lock. 49 * Isolate free pages onto a private freelist. Caller must hold zone->lock.
59 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free 50 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
@@ -122,7 +113,7 @@ static unsigned long isolate_freepages_block(unsigned long blockpfn,
122 * (which may be greater then end_pfn if end fell in a middle of 113 * (which may be greater then end_pfn if end fell in a middle of
123 * a free page). 114 * a free page).
124 */ 115 */
125static unsigned long 116unsigned long
126isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn) 117isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
127{ 118{
128 unsigned long isolated, pfn, block_end_pfn, flags; 119 unsigned long isolated, pfn, block_end_pfn, flags;
@@ -176,127 +167,6 @@ isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
176 return pfn; 167 return pfn;
177} 168}
178 169
179/* Returns true if the page is within a block suitable for migration to */
180static bool suitable_migration_target(struct page *page)
181{
182
183 int migratetype = get_pageblock_migratetype(page);
184
185 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
186 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
187 return false;
188
189 /* If the page is a large free page, then allow migration */
190 if (PageBuddy(page) && page_order(page) >= pageblock_order)
191 return true;
192
193 /* If the block is MIGRATE_MOVABLE, allow migration */
194 if (migratetype == MIGRATE_MOVABLE)
195 return true;
196
197 /* Otherwise skip the block */
198 return false;
199}
200
201static void map_pages(struct list_head *list)
202{
203 struct page *page;
204
205 list_for_each_entry(page, list, lru) {
206 arch_alloc_page(page, 0);
207 kernel_map_pages(page, 1, 1);
208 }
209}
210
211/*
212 * Based on information in the current compact_control, find blocks
213 * suitable for isolating free pages from and then isolate them.
214 */
215static void isolate_freepages(struct zone *zone,
216 struct compact_control *cc)
217{
218 struct page *page;
219 unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
220 unsigned long flags;
221 int nr_freepages = cc->nr_freepages;
222 struct list_head *freelist = &cc->freepages;
223
224 /*
225 * Initialise the free scanner. The starting point is where we last
226 * scanned from (or the end of the zone if starting). The low point
227 * is the end of the pageblock the migration scanner is using.
228 */
229 pfn = cc->free_pfn;
230 low_pfn = cc->migrate_pfn + pageblock_nr_pages;
231
232 /*
233 * Take care that if the migration scanner is at the end of the zone
234 * that the free scanner does not accidentally move to the next zone
235 * in the next isolation cycle.
236 */
237 high_pfn = min(low_pfn, pfn);
238
239 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
240
241 /*
242 * Isolate free pages until enough are available to migrate the
243 * pages on cc->migratepages. We stop searching if the migrate
244 * and free page scanners meet or enough free pages are isolated.
245 */
246 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
247 pfn -= pageblock_nr_pages) {
248 unsigned long isolated;
249
250 if (!pfn_valid(pfn))
251 continue;
252
253 /*
254 * Check for overlapping nodes/zones. It's possible on some
255 * configurations to have a setup like
256 * node0 node1 node0
257 * i.e. it's possible that all pages within a zones range of
258 * pages do not belong to a single zone.
259 */
260 page = pfn_to_page(pfn);
261 if (page_zone(page) != zone)
262 continue;
263
264 /* Check the block is suitable for migration */
265 if (!suitable_migration_target(page))
266 continue;
267
268 /*
269 * Found a block suitable for isolating free pages from. Now
270 * we disabled interrupts, double check things are ok and
271 * isolate the pages. This is to minimise the time IRQs
272 * are disabled
273 */
274 isolated = 0;
275 spin_lock_irqsave(&zone->lock, flags);
276 if (suitable_migration_target(page)) {
277 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
278 isolated = isolate_freepages_block(pfn, end_pfn,
279 freelist, false);
280 nr_freepages += isolated;
281 }
282 spin_unlock_irqrestore(&zone->lock, flags);
283
284 /*
285 * Record the highest PFN we isolated pages from. When next
286 * looking for free pages, the search will restart here as
287 * page migration may have returned some pages to the allocator
288 */
289 if (isolated)
290 high_pfn = max(high_pfn, pfn);
291 }
292
293 /* split_free_page does not map the pages */
294 map_pages(freelist);
295
296 cc->free_pfn = high_pfn;
297 cc->nr_freepages = nr_freepages;
298}
299
300/* Update the number of anon and file isolated pages in the zone */ 170/* Update the number of anon and file isolated pages in the zone */
301static void acct_isolated(struct zone *zone, struct compact_control *cc) 171static void acct_isolated(struct zone *zone, struct compact_control *cc)
302{ 172{
@@ -325,13 +195,6 @@ static bool too_many_isolated(struct zone *zone)
325 return isolated > (inactive + active) / 2; 195 return isolated > (inactive + active) / 2;
326} 196}
327 197
328/* possible outcome of isolate_migratepages */
329typedef enum {
330 ISOLATE_ABORT, /* Abort compaction now */
331 ISOLATE_NONE, /* No pages isolated, continue scanning */
332 ISOLATE_SUCCESS, /* Pages isolated, migrate */
333} isolate_migrate_t;
334
335/** 198/**
336 * isolate_migratepages_range() - isolate all migrate-able pages in range. 199 * isolate_migratepages_range() - isolate all migrate-able pages in range.
337 * @zone: Zone pages are in. 200 * @zone: Zone pages are in.
@@ -351,7 +214,7 @@ typedef enum {
351 * does not modify any cc's fields, in particular it does not modify 214 * does not modify any cc's fields, in particular it does not modify
352 * (or read for that matter) cc->migrate_pfn. 215 * (or read for that matter) cc->migrate_pfn.
353 */ 216 */
354static unsigned long 217unsigned long
355isolate_migratepages_range(struct zone *zone, struct compact_control *cc, 218isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
356 unsigned long low_pfn, unsigned long end_pfn) 219 unsigned long low_pfn, unsigned long end_pfn)
357{ 220{
@@ -487,35 +350,118 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
487 return low_pfn; 350 return low_pfn;
488} 351}
489 352
353#endif /* CONFIG_COMPACTION || CONFIG_CMA */
354#ifdef CONFIG_COMPACTION
355
356/* Returns true if the page is within a block suitable for migration to */
357static bool suitable_migration_target(struct page *page)
358{
359
360 int migratetype = get_pageblock_migratetype(page);
361
362 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
363 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
364 return false;
365
366 /* If the page is a large free page, then allow migration */
367 if (PageBuddy(page) && page_order(page) >= pageblock_order)
368 return true;
369
370 /* If the block is MIGRATE_MOVABLE, allow migration */
371 if (migratetype == MIGRATE_MOVABLE)
372 return true;
373
374 /* Otherwise skip the block */
375 return false;
376}
377
490/* 378/*
491 * Isolate all pages that can be migrated from the block pointed to by 379 * Based on information in the current compact_control, find blocks
492 * the migrate scanner within compact_control. 380 * suitable for isolating free pages from and then isolate them.
493 */ 381 */
494static isolate_migrate_t isolate_migratepages(struct zone *zone, 382static void isolate_freepages(struct zone *zone,
495 struct compact_control *cc) 383 struct compact_control *cc)
496{ 384{
497 unsigned long low_pfn, end_pfn; 385 struct page *page;
386 unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
387 unsigned long flags;
388 int nr_freepages = cc->nr_freepages;
389 struct list_head *freelist = &cc->freepages;
498 390
499 /* Do not scan outside zone boundaries */ 391 /*
500 low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); 392 * Initialise the free scanner. The starting point is where we last
393 * scanned from (or the end of the zone if starting). The low point
394 * is the end of the pageblock the migration scanner is using.
395 */
396 pfn = cc->free_pfn;
397 low_pfn = cc->migrate_pfn + pageblock_nr_pages;
501 398
502 /* Only scan within a pageblock boundary */ 399 /*
503 end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages); 400 * Take care that if the migration scanner is at the end of the zone
401 * that the free scanner does not accidentally move to the next zone
402 * in the next isolation cycle.
403 */
404 high_pfn = min(low_pfn, pfn);
504 405
505 /* Do not cross the free scanner or scan within a memory hole */ 406 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
506 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
507 cc->migrate_pfn = end_pfn;
508 return ISOLATE_NONE;
509 }
510 407
511 /* Perform the isolation */ 408 /*
512 low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn); 409 * Isolate free pages until enough are available to migrate the
513 if (!low_pfn) 410 * pages on cc->migratepages. We stop searching if the migrate
514 return ISOLATE_ABORT; 411 * and free page scanners meet or enough free pages are isolated.
412 */
413 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
414 pfn -= pageblock_nr_pages) {
415 unsigned long isolated;
515 416
516 cc->migrate_pfn = low_pfn; 417 if (!pfn_valid(pfn))
418 continue;
517 419
518 return ISOLATE_SUCCESS; 420 /*
421 * Check for overlapping nodes/zones. It's possible on some
422 * configurations to have a setup like
423 * node0 node1 node0
424 * i.e. it's possible that all pages within a zones range of
425 * pages do not belong to a single zone.
426 */
427 page = pfn_to_page(pfn);
428 if (page_zone(page) != zone)
429 continue;
430
431 /* Check the block is suitable for migration */
432 if (!suitable_migration_target(page))
433 continue;
434
435 /*
436 * Found a block suitable for isolating free pages from. Now
437 * we disabled interrupts, double check things are ok and
438 * isolate the pages. This is to minimise the time IRQs
439 * are disabled
440 */
441 isolated = 0;
442 spin_lock_irqsave(&zone->lock, flags);
443 if (suitable_migration_target(page)) {
444 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
445 isolated = isolate_freepages_block(pfn, end_pfn,
446 freelist, false);
447 nr_freepages += isolated;
448 }
449 spin_unlock_irqrestore(&zone->lock, flags);
450
451 /*
452 * Record the highest PFN we isolated pages from. When next
453 * looking for free pages, the search will restart here as
454 * page migration may have returned some pages to the allocator
455 */
456 if (isolated)
457 high_pfn = max(high_pfn, pfn);
458 }
459
460 /* split_free_page does not map the pages */
461 map_pages(freelist);
462
463 cc->free_pfn = high_pfn;
464 cc->nr_freepages = nr_freepages;
519} 465}
520 466
521/* 467/*
@@ -564,6 +510,44 @@ static void update_nr_listpages(struct compact_control *cc)
564 cc->nr_freepages = nr_freepages; 510 cc->nr_freepages = nr_freepages;
565} 511}
566 512
513/* possible outcome of isolate_migratepages */
514typedef enum {
515 ISOLATE_ABORT, /* Abort compaction now */
516 ISOLATE_NONE, /* No pages isolated, continue scanning */
517 ISOLATE_SUCCESS, /* Pages isolated, migrate */
518} isolate_migrate_t;
519
520/*
521 * Isolate all pages that can be migrated from the block pointed to by
522 * the migrate scanner within compact_control.
523 */
524static isolate_migrate_t isolate_migratepages(struct zone *zone,
525 struct compact_control *cc)
526{
527 unsigned long low_pfn, end_pfn;
528
529 /* Do not scan outside zone boundaries */
530 low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
531
532 /* Only scan within a pageblock boundary */
533 end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
534
535 /* Do not cross the free scanner or scan within a memory hole */
536 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
537 cc->migrate_pfn = end_pfn;
538 return ISOLATE_NONE;
539 }
540
541 /* Perform the isolation */
542 low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
543 if (!low_pfn)
544 return ISOLATE_ABORT;
545
546 cc->migrate_pfn = low_pfn;
547
548 return ISOLATE_SUCCESS;
549}
550
567static int compact_finished(struct zone *zone, 551static int compact_finished(struct zone *zone,
568 struct compact_control *cc) 552 struct compact_control *cc)
569{ 553{
@@ -910,3 +894,5 @@ void compaction_unregister_node(struct node *node)
910 return device_remove_file(&node->dev, &dev_attr_compact); 894 return device_remove_file(&node->dev, &dev_attr_compact);
911} 895}
912#endif /* CONFIG_SYSFS && CONFIG_NUMA */ 896#endif /* CONFIG_SYSFS && CONFIG_NUMA */
897
898#endif /* CONFIG_COMPACTION */
diff --git a/mm/internal.h b/mm/internal.h
index 2189af491783..aee4761cf9a9 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -100,6 +100,39 @@ extern void prep_compound_page(struct page *page, unsigned long order);
100extern bool is_free_buddy_page(struct page *page); 100extern bool is_free_buddy_page(struct page *page);
101#endif 101#endif
102 102
103#if defined CONFIG_COMPACTION || defined CONFIG_CMA
104
105/*
106 * in mm/compaction.c
107 */
108/*
109 * compact_control is used to track pages being migrated and the free pages
110 * they are being migrated to during memory compaction. The free_pfn starts
111 * at the end of a zone and migrate_pfn begins at the start. Movable pages
112 * are moved to the end of a zone during a compaction run and the run
113 * completes when free_pfn <= migrate_pfn
114 */
115struct compact_control {
116 struct list_head freepages; /* List of free pages to migrate to */
117 struct list_head migratepages; /* List of pages being migrated */
118 unsigned long nr_freepages; /* Number of isolated free pages */
119 unsigned long nr_migratepages; /* Number of pages to migrate */
120 unsigned long free_pfn; /* isolate_freepages search base */
121 unsigned long migrate_pfn; /* isolate_migratepages search base */
122 bool sync; /* Synchronous migration */
123
124 int order; /* order a direct compactor needs */
125 int migratetype; /* MOVABLE, RECLAIMABLE etc */
126 struct zone *zone;
127};
128
129unsigned long
130isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn);
131unsigned long
132isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
133 unsigned long low_pfn, unsigned long end_pfn);
134
135#endif
103 136
104/* 137/*
105 * function for dealing with page's order in buddy system. 138 * function for dealing with page's order in buddy system.