diff options
author | Mel Gorman <mel@csn.ul.ie> | 2009-06-16 18:32:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 22:47:33 -0400 |
commit | 728ec980fb9fa2d65d9e05444079a53615985e7b (patch) | |
tree | d98dca98cd46fc28a871135cc9bd95168e4667b3 /mm/page_alloc.c | |
parent | a56f57ff94c25d5d80def06f3ed8fe7f99147762 (diff) |
page allocator: inline __rmqueue_smallest()
Inline __rmqueue_smallest by altering flow very slightly so that there is
only one call site. Because there is only one call-site, this function
can then be inlined without causing text bloat. On an x86-based config,
this patch reduces text by 16 bytes.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 20 |
1 files changed, 16 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 94f33e2b7f0b..04713f649fd4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -661,7 +661,8 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) | |||
661 | * Go through the free lists for the given migratetype and remove | 661 | * Go through the free lists for the given migratetype and remove |
662 | * the smallest available page from the freelists | 662 | * the smallest available page from the freelists |
663 | */ | 663 | */ |
664 | static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, | 664 | static inline |
665 | struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, | ||
665 | int migratetype) | 666 | int migratetype) |
666 | { | 667 | { |
667 | unsigned int current_order; | 668 | unsigned int current_order; |
@@ -831,8 +832,7 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order, | |||
831 | } | 832 | } |
832 | } | 833 | } |
833 | 834 | ||
834 | /* Use MIGRATE_RESERVE rather than fail an allocation */ | 835 | return NULL; |
835 | return __rmqueue_smallest(zone, order, MIGRATE_RESERVE); | ||
836 | } | 836 | } |
837 | 837 | ||
838 | /* | 838 | /* |
@@ -844,11 +844,23 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order, | |||
844 | { | 844 | { |
845 | struct page *page; | 845 | struct page *page; |
846 | 846 | ||
847 | retry_reserve: | ||
847 | page = __rmqueue_smallest(zone, order, migratetype); | 848 | page = __rmqueue_smallest(zone, order, migratetype); |
848 | 849 | ||
849 | if (unlikely(!page)) | 850 | if (unlikely(!page) && migratetype != MIGRATE_RESERVE) { |
850 | page = __rmqueue_fallback(zone, order, migratetype); | 851 | page = __rmqueue_fallback(zone, order, migratetype); |
851 | 852 | ||
853 | /* | ||
854 | * Use MIGRATE_RESERVE rather than fail an allocation. goto | ||
855 | * is used because __rmqueue_smallest is an inline function | ||
856 | * and we want just one call site | ||
857 | */ | ||
858 | if (!page) { | ||
859 | migratetype = MIGRATE_RESERVE; | ||
860 | goto retry_reserve; | ||
861 | } | ||
862 | } | ||
863 | |||
852 | return page; | 864 | return page; |
853 | } | 865 | } |
854 | 866 | ||