aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-01-13 18:47:00 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:43 -0500
commit5f24ce5fd34c3ca1b3d10d30da754732da64d5c0 (patch)
treec82d27461f2adda210e77808b7dd04eaec017f2f /mm
parent21ae5b01750f14140809508a478a4413792e0261 (diff)
thp: remove PG_buddy
PG_buddy can be converted to _mapcount == -2. So the PG_compound_lock can be added to page->flags without overflowing (because of the sparse section bits increasing) with CONFIG_X86_PAE=y and CONFIG_X86_PAT=y. This also has to move the memory hotplug code from _mapcount to lru.next to avoid any risk of clashes. We can't use lru.next for PG_buddy removal, but memory hotplug can use lru.next even more easily than the mapcount instead. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory_hotplug.c14
-rw-r--r--mm/page_alloc.c7
-rw-r--r--mm/sparse.c4
3 files changed, 13 insertions, 12 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index a2832c092509..e92f04749fcb 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -82,9 +82,10 @@ static void release_memory_resource(struct resource *res)
82 82
83#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE 83#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
84#ifndef CONFIG_SPARSEMEM_VMEMMAP 84#ifndef CONFIG_SPARSEMEM_VMEMMAP
85static void get_page_bootmem(unsigned long info, struct page *page, int type) 85static void get_page_bootmem(unsigned long info, struct page *page,
86 unsigned long type)
86{ 87{
87 atomic_set(&page->_mapcount, type); 88 page->lru.next = (struct list_head *) type;
88 SetPagePrivate(page); 89 SetPagePrivate(page);
89 set_page_private(page, info); 90 set_page_private(page, info);
90 atomic_inc(&page->_count); 91 atomic_inc(&page->_count);
@@ -94,15 +95,16 @@ static void get_page_bootmem(unsigned long info, struct page *page, int type)
94 * so use __ref to tell modpost not to generate a warning */ 95 * so use __ref to tell modpost not to generate a warning */
95void __ref put_page_bootmem(struct page *page) 96void __ref put_page_bootmem(struct page *page)
96{ 97{
97 int type; 98 unsigned long type;
98 99
99 type = atomic_read(&page->_mapcount); 100 type = (unsigned long) page->lru.next;
100 BUG_ON(type >= -1); 101 BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
102 type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
101 103
102 if (atomic_dec_return(&page->_count) == 1) { 104 if (atomic_dec_return(&page->_count) == 1) {
103 ClearPagePrivate(page); 105 ClearPagePrivate(page);
104 set_page_private(page, 0); 106 set_page_private(page, 0);
105 reset_page_mapcount(page); 107 INIT_LIST_HEAD(&page->lru);
106 __free_pages_bootmem(page, 0); 108 __free_pages_bootmem(page, 0);
107 } 109 }
108 110
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e7664b9f706c..9dfe49bceff4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -449,8 +449,8 @@ __find_combined_index(unsigned long page_idx, unsigned int order)
449 * (c) a page and its buddy have the same order && 449 * (c) a page and its buddy have the same order &&
450 * (d) a page and its buddy are in the same zone. 450 * (d) a page and its buddy are in the same zone.
451 * 451 *
452 * For recording whether a page is in the buddy system, we use PG_buddy. 452 * For recording whether a page is in the buddy system, we set ->_mapcount -2.
453 * Setting, clearing, and testing PG_buddy is serialized by zone->lock. 453 * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
454 * 454 *
455 * For recording page's order, we use page_private(page). 455 * For recording page's order, we use page_private(page).
456 */ 456 */
@@ -483,7 +483,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
483 * as necessary, plus some accounting needed to play nicely with other 483 * as necessary, plus some accounting needed to play nicely with other
484 * parts of the VM system. 484 * parts of the VM system.
485 * At each level, we keep a list of pages, which are heads of continuous 485 * At each level, we keep a list of pages, which are heads of continuous
486 * free pages of length of (1 << order) and marked with PG_buddy. Page's 486 * free pages of length of (1 << order) and marked with _mapcount -2. Page's
487 * order is recorded in page_private(page) field. 487 * order is recorded in page_private(page) field.
488 * So when we are allocating or freeing one, we can derive the state of the 488 * So when we are allocating or freeing one, we can derive the state of the
489 * other. That is, if we allocate a small block, and both were 489 * other. That is, if we allocate a small block, and both were
@@ -5574,7 +5574,6 @@ static struct trace_print_flags pageflag_names[] = {
5574 {1UL << PG_swapcache, "swapcache" }, 5574 {1UL << PG_swapcache, "swapcache" },
5575 {1UL << PG_mappedtodisk, "mappedtodisk" }, 5575 {1UL << PG_mappedtodisk, "mappedtodisk" },
5576 {1UL << PG_reclaim, "reclaim" }, 5576 {1UL << PG_reclaim, "reclaim" },
5577 {1UL << PG_buddy, "buddy" },
5578 {1UL << PG_swapbacked, "swapbacked" }, 5577 {1UL << PG_swapbacked, "swapbacked" },
5579 {1UL << PG_unevictable, "unevictable" }, 5578 {1UL << PG_unevictable, "unevictable" },
5580#ifdef CONFIG_MMU 5579#ifdef CONFIG_MMU
diff --git a/mm/sparse.c b/mm/sparse.c
index 95ac219af379..93250207c5cf 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -671,10 +671,10 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
671static void free_map_bootmem(struct page *page, unsigned long nr_pages) 671static void free_map_bootmem(struct page *page, unsigned long nr_pages)
672{ 672{
673 unsigned long maps_section_nr, removing_section_nr, i; 673 unsigned long maps_section_nr, removing_section_nr, i;
674 int magic; 674 unsigned long magic;
675 675
676 for (i = 0; i < nr_pages; i++, page++) { 676 for (i = 0; i < nr_pages; i++, page++) {
677 magic = atomic_read(&page->_mapcount); 677 magic = (unsigned long) page->lru.next;
678 678
679 BUG_ON(magic == NODE_INFO); 679 BUG_ON(magic == NODE_INFO);
680 680