diff options
author | Badari Pulavarty <pbadari@us.ibm.com> | 2008-07-24 00:28:19 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-24 13:47:21 -0400 |
commit | 5c755e9fd813810680abd56ec09a5f90143e815b (patch) | |
tree | 1686c0666f6bd630441957a53c546d90b0f61723 /mm | |
parent | 2f7f24eca31c4fc2fdb134b2ef743ccd67cfb9a9 (diff) |
memory-hotplug: add sysfs removable attribute for hotplug memory remove
Memory may be hot-removed on a per-memory-block basis, particularly on
POWER where the SPARSEMEM section size often matches the memory-block
size. A user-level agent must be able to identify which sections of
memory are likely to be removable before attempting the potentially
expensive operation. This patch adds a file called "removable" to the
memory directory in sysfs to help such an agent. In this patch, a memory
block is considered removable if;
o It contains only MOVABLE pageblocks
o It contains only pageblocks with free pages regardless of pageblock type
On the other hand, a memory block starting with a PageReserved() page will
never be considered removable. Without this patch, the user-agent is
forced to choose a memory block to remove randomly.
Sample output of the sysfs files:
./memory/memory0/removable: 0
./memory/memory1/removable: 0
./memory/memory2/removable: 0
./memory/memory3/removable: 0
./memory/memory4/removable: 0
./memory/memory5/removable: 0
./memory/memory6/removable: 0
./memory/memory7/removable: 1
./memory/memory8/removable: 0
./memory/memory9/removable: 0
./memory/memory10/removable: 0
./memory/memory11/removable: 0
./memory/memory12/removable: 0
./memory/memory13/removable: 0
./memory/memory14/removable: 0
./memory/memory15/removable: 0
./memory/memory16/removable: 0
./memory/memory17/removable: 1
./memory/memory18/removable: 1
./memory/memory19/removable: 1
./memory/memory20/removable: 1
./memory/memory21/removable: 1
./memory/memory22/removable: 1
Signed-off-by: Badari Pulavarty <pbadari@us.ibm.com>
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory_hotplug.c | 60 |
1 files changed, 60 insertions, 0 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 93aba78dc8b..89fee2dcb03 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -523,6 +523,66 @@ EXPORT_SYMBOL_GPL(add_memory); | |||
523 | 523 | ||
524 | #ifdef CONFIG_MEMORY_HOTREMOVE | 524 | #ifdef CONFIG_MEMORY_HOTREMOVE |
525 | /* | 525 | /* |
526 | * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy | ||
527 | * set and the size of the free page is given by page_order(). Using this, | ||
528 | * the function determines if the pageblock contains only free pages. | ||
529 | * Due to buddy contraints, a free page at least the size of a pageblock will | ||
530 | * be located at the start of the pageblock | ||
531 | */ | ||
532 | static inline int pageblock_free(struct page *page) | ||
533 | { | ||
534 | return PageBuddy(page) && page_order(page) >= pageblock_order; | ||
535 | } | ||
536 | |||
537 | /* Return the start of the next active pageblock after a given page */ | ||
538 | static struct page *next_active_pageblock(struct page *page) | ||
539 | { | ||
540 | int pageblocks_stride; | ||
541 | |||
542 | /* Ensure the starting page is pageblock-aligned */ | ||
543 | BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); | ||
544 | |||
545 | /* Move forward by at least 1 * pageblock_nr_pages */ | ||
546 | pageblocks_stride = 1; | ||
547 | |||
548 | /* If the entire pageblock is free, move to the end of free page */ | ||
549 | if (pageblock_free(page)) | ||
550 | pageblocks_stride += page_order(page) - pageblock_order; | ||
551 | |||
552 | return page + (pageblocks_stride * pageblock_nr_pages); | ||
553 | } | ||
554 | |||
555 | /* Checks if this range of memory is likely to be hot-removable. */ | ||
556 | int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) | ||
557 | { | ||
558 | int type; | ||
559 | struct page *page = pfn_to_page(start_pfn); | ||
560 | struct page *end_page = page + nr_pages; | ||
561 | |||
562 | /* Check the starting page of each pageblock within the range */ | ||
563 | for (; page < end_page; page = next_active_pageblock(page)) { | ||
564 | type = get_pageblock_migratetype(page); | ||
565 | |||
566 | /* | ||
567 | * A pageblock containing MOVABLE or free pages is considered | ||
568 | * removable | ||
569 | */ | ||
570 | if (type != MIGRATE_MOVABLE && !pageblock_free(page)) | ||
571 | return 0; | ||
572 | |||
573 | /* | ||
574 | * A pageblock starting with a PageReserved page is not | ||
575 | * considered removable. | ||
576 | */ | ||
577 | if (PageReserved(page)) | ||
578 | return 0; | ||
579 | } | ||
580 | |||
581 | /* All pageblocks in the memory block are likely to be hot-removable */ | ||
582 | return 1; | ||
583 | } | ||
584 | |||
585 | /* | ||
526 | * Confirm all pages in a range [start, end) is belongs to the same zone. | 586 | * Confirm all pages in a range [start, end) is belongs to the same zone. |
527 | */ | 587 | */ |
528 | static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) | 588 | static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) |