aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMichal Nazarewicz <mina86@mina86.com>2011-12-29 07:09:50 -0500
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-05-21 09:09:31 -0400
commit041d3a8cdc18dc375a128d90bbb753949a81b1fb (patch)
tree26f5e1b30f638ea148e7deb13c44d0d0f1d39935 /mm
parentff9543fd32060917beb080b1eb2d1d41ec7f39e0 (diff)
mm: page_alloc: introduce alloc_contig_range()
This commit adds the alloc_contig_range() function which tries to allocate given range of pages. It tries to migrate all already allocated pages that fall in the range thus freeing them. Once all pages in the range are freed they are removed from the buddy system thus allocated for the caller to use. Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Tested-by: Rob Clark <rob.clark@linaro.org> Tested-by: Ohad Ben-Cohen <ohad@wizery.com> Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org> Tested-by: Robert Nelson <robertcnelson@gmail.com> Tested-by: Barry Song <Baohua.Song@csr.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c188
1 files changed, 188 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6fb46c1589b9..2c38a30d064e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -57,6 +57,7 @@
57#include <linux/ftrace_event.h> 57#include <linux/ftrace_event.h>
58#include <linux/memcontrol.h> 58#include <linux/memcontrol.h>
59#include <linux/prefetch.h> 59#include <linux/prefetch.h>
60#include <linux/migrate.h>
60#include <linux/page-debug-flags.h> 61#include <linux/page-debug-flags.h>
61 62
62#include <asm/tlbflush.h> 63#include <asm/tlbflush.h>
@@ -5550,6 +5551,193 @@ out:
5550 spin_unlock_irqrestore(&zone->lock, flags); 5551 spin_unlock_irqrestore(&zone->lock, flags);
5551} 5552}
5552 5553
5554#ifdef CONFIG_CMA
5555
5556static unsigned long pfn_max_align_down(unsigned long pfn)
5557{
5558 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
5559 pageblock_nr_pages) - 1);
5560}
5561
5562static unsigned long pfn_max_align_up(unsigned long pfn)
5563{
5564 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
5565 pageblock_nr_pages));
5566}
5567
5568static struct page *
5569__alloc_contig_migrate_alloc(struct page *page, unsigned long private,
5570 int **resultp)
5571{
5572 return alloc_page(GFP_HIGHUSER_MOVABLE);
5573}
5574
5575/* [start, end) must belong to a single zone. */
5576static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
5577{
5578 /* This function is based on compact_zone() from compaction.c. */
5579
5580 unsigned long pfn = start;
5581 unsigned int tries = 0;
5582 int ret = 0;
5583
5584 struct compact_control cc = {
5585 .nr_migratepages = 0,
5586 .order = -1,
5587 .zone = page_zone(pfn_to_page(start)),
5588 .sync = true,
5589 };
5590 INIT_LIST_HEAD(&cc.migratepages);
5591
5592 migrate_prep_local();
5593
5594 while (pfn < end || !list_empty(&cc.migratepages)) {
5595 if (fatal_signal_pending(current)) {
5596 ret = -EINTR;
5597 break;
5598 }
5599
5600 if (list_empty(&cc.migratepages)) {
5601 cc.nr_migratepages = 0;
5602 pfn = isolate_migratepages_range(cc.zone, &cc,
5603 pfn, end);
5604 if (!pfn) {
5605 ret = -EINTR;
5606 break;
5607 }
5608 tries = 0;
5609 } else if (++tries == 5) {
5610 ret = ret < 0 ? ret : -EBUSY;
5611 break;
5612 }
5613
5614 ret = migrate_pages(&cc.migratepages,
5615 __alloc_contig_migrate_alloc,
5616 0, false, true);
5617 }
5618
5619 putback_lru_pages(&cc.migratepages);
5620 return ret > 0 ? 0 : ret;
5621}
5622
5623/**
5624 * alloc_contig_range() -- tries to allocate given range of pages
5625 * @start: start PFN to allocate
5626 * @end: one-past-the-last PFN to allocate
5627 *
5628 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
5629 * aligned, however it's the caller's responsibility to guarantee that
5630 * we are the only thread that changes migrate type of pageblocks the
5631 * pages fall in.
5632 *
5633 * The PFN range must belong to a single zone.
5634 *
5635 * Returns zero on success or negative error code. On success all
5636 * pages which PFN is in [start, end) are allocated for the caller and
5637 * need to be freed with free_contig_range().
5638 */
5639int alloc_contig_range(unsigned long start, unsigned long end)
5640{
5641 struct zone *zone = page_zone(pfn_to_page(start));
5642 unsigned long outer_start, outer_end;
5643 int ret = 0, order;
5644
5645 /*
5646 * What we do here is we mark all pageblocks in range as
5647 * MIGRATE_ISOLATE. Because pageblock and max order pages may
5648 * have different sizes, and due to the way page allocator
5649 * work, we align the range to biggest of the two pages so
5650 * that page allocator won't try to merge buddies from
5651 * different pageblocks and change MIGRATE_ISOLATE to some
5652 * other migration type.
5653 *
5654 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
5655 * migrate the pages from an unaligned range (ie. pages that
5656 * we are interested in). This will put all the pages in
5657 * range back to page allocator as MIGRATE_ISOLATE.
5658 *
5659 * When this is done, we take the pages in range from page
5660 * allocator removing them from the buddy system. This way
5661 * page allocator will never consider using them.
5662 *
5663 * This lets us mark the pageblocks back as
5664 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
5665 * aligned range but not in the unaligned, original range are
5666 * put back to page allocator so that buddy can use them.
5667 */
5668
5669 ret = start_isolate_page_range(pfn_max_align_down(start),
5670 pfn_max_align_up(end));
5671 if (ret)
5672 goto done;
5673
5674 ret = __alloc_contig_migrate_range(start, end);
5675 if (ret)
5676 goto done;
5677
5678 /*
5679 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
5680 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
5681 * more, all pages in [start, end) are free in page allocator.
5682 * What we are going to do is to allocate all pages from
5683 * [start, end) (that is remove them from page allocator).
5684 *
5685 * The only problem is that pages at the beginning and at the
5686 * end of interesting range may be not aligned with pages that
5687 * page allocator holds, ie. they can be part of higher order
5688 * pages. Because of this, we reserve the bigger range and
5689 * once this is done free the pages we are not interested in.
5690 *
5691 * We don't have to hold zone->lock here because the pages are
5692 * isolated thus they won't get removed from buddy.
5693 */
5694
5695 lru_add_drain_all();
5696 drain_all_pages();
5697
5698 order = 0;
5699 outer_start = start;
5700 while (!PageBuddy(pfn_to_page(outer_start))) {
5701 if (++order >= MAX_ORDER) {
5702 ret = -EBUSY;
5703 goto done;
5704 }
5705 outer_start &= ~0UL << order;
5706 }
5707
5708 /* Make sure the range is really isolated. */
5709 if (test_pages_isolated(outer_start, end)) {
5710 pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
5711 outer_start, end);
5712 ret = -EBUSY;
5713 goto done;
5714 }
5715
5716 outer_end = isolate_freepages_range(outer_start, end);
5717 if (!outer_end) {
5718 ret = -EBUSY;
5719 goto done;
5720 }
5721
5722 /* Free head and tail (if any) */
5723 if (start != outer_start)
5724 free_contig_range(outer_start, start - outer_start);
5725 if (end != outer_end)
5726 free_contig_range(end, outer_end - end);
5727
5728done:
5729 undo_isolate_page_range(pfn_max_align_down(start),
5730 pfn_max_align_up(end));
5731 return ret;
5732}
5733
5734void free_contig_range(unsigned long pfn, unsigned nr_pages)
5735{
5736 for (; nr_pages--; ++pfn)
5737 __free_page(pfn_to_page(pfn));
5738}
5739#endif
5740
5553#ifdef CONFIG_MEMORY_HOTREMOVE 5741#ifdef CONFIG_MEMORY_HOTREMOVE
5554/* 5742/*
5555 * All pages in the range must be isolated before calling this. 5743 * All pages in the range must be isolated before calling this.