aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Whitcroft <apw@shadowen.org>2007-05-06 17:49:14 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:12:52 -0400
commit14e072984179d3d421bf9ab75cc67e0961742841 (patch)
tree65a5a6f7d9756b8e7010278b58908d04da257a28
parentac267728f13c55017ed5ee243c9c3166e27ab929 (diff)
add pfn_valid_within helper for sub-MAX_ORDER hole detection
Generally we work under the assumption that memory the mem_map array is contigious and valid out to MAX_ORDER_NR_PAGES block of pages, ie. that if we have validated any page within this MAX_ORDER_NR_PAGES block we need not check any other. This is not true when CONFIG_HOLES_IN_ZONE is set and we must check each and every reference we make from a pfn. Add a pfn_valid_within() helper which should be used when scanning pages within a MAX_ORDER_NR_PAGES block when we have already checked the validility of the block normally with pfn_valid(). This can then be optimised away when we do not have holes within a MAX_ORDER_NR_PAGES block of pages. Signed-off-by: Andy Whitcroft <apw@shadowen.org> Acked-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Bob Picco <bob.picco@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mmzone.h12
-rw-r--r--mm/page_alloc.c8
2 files changed, 14 insertions, 6 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ee9e3143df4f..2f1544e83042 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -784,6 +784,18 @@ void sparse_init(void);
784void memory_present(int nid, unsigned long start, unsigned long end); 784void memory_present(int nid, unsigned long start, unsigned long end);
785unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); 785unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
786 786
787/*
788 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
789 * need to check pfn validility within that MAX_ORDER_NR_PAGES block.
790 * pfn_valid_within() should be used in this case; we optimise this away
791 * when we have no holes within a MAX_ORDER_NR_PAGES block.
792 */
793#ifdef CONFIG_HOLES_IN_ZONE
794#define pfn_valid_within(pfn) pfn_valid(pfn)
795#else
796#define pfn_valid_within(pfn) (1)
797#endif
798
787#endif /* !__ASSEMBLY__ */ 799#endif /* !__ASSEMBLY__ */
788#endif /* __KERNEL__ */ 800#endif /* __KERNEL__ */
789#endif /* _LINUX_MMZONE_H */ 801#endif /* _LINUX_MMZONE_H */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 019ceda6a8b6..f564717d22f3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -156,10 +156,8 @@ static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
156 156
157static int page_is_consistent(struct zone *zone, struct page *page) 157static int page_is_consistent(struct zone *zone, struct page *page)
158{ 158{
159#ifdef CONFIG_HOLES_IN_ZONE 159 if (!pfn_valid_within(page_to_pfn(page)))
160 if (!pfn_valid(page_to_pfn(page)))
161 return 0; 160 return 0;
162#endif
163 if (zone != page_zone(page)) 161 if (zone != page_zone(page))
164 return 0; 162 return 0;
165 163
@@ -346,10 +344,8 @@ __find_combined_index(unsigned long page_idx, unsigned int order)
346static inline int page_is_buddy(struct page *page, struct page *buddy, 344static inline int page_is_buddy(struct page *page, struct page *buddy,
347 int order) 345 int order)
348{ 346{
349#ifdef CONFIG_HOLES_IN_ZONE 347 if (!pfn_valid_within(page_to_pfn(buddy)))
350 if (!pfn_valid(page_to_pfn(buddy)))
351 return 0; 348 return 0;
352#endif
353 349
354 if (page_zone_id(page) != page_zone_id(buddy)) 350 if (page_zone_id(page) != page_zone_id(buddy))
355 return 0; 351 return 0;