diff options
author | Adrian Bunk <bunk@kernel.org> | 2008-07-24 00:28:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-24 13:47:20 -0400 |
commit | b69a7288ea7bf171328f313f0edae629f50e3bdb (patch) | |
tree | 20cb483cced19bda14dc4f12bbba6e7c1d01c4a3 /mm/page_alloc.c | |
parent | 2be0ffe2b29bd31d3debd0877797892ff2d91f4c (diff) |
mm/page_alloc.c: cleanups
This patch contains the following cleanups:
- make the following needlessly global variables static:
- required_kernelcore
- zone_movable_pfn[]
- make the following needlessly global functions static:
- move_freepages()
- move_freepages_block()
- setup_pageset()
- find_usable_zone_for_movable()
- adjust_zone_range_for_zone_movable()
- __absent_pages_in_range()
- find_min_pfn_for_node()
- find_zone_movable_pfns_for_nodes()
Signed-off-by: Adrian Bunk <bunk@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 25 |
1 files changed, 13 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8d528d57b403..cd4c41432ef6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -153,9 +153,9 @@ static unsigned long __meminitdata dma_reserve; | |||
153 | static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES]; | 153 | static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES]; |
154 | static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES]; | 154 | static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES]; |
155 | #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ | 155 | #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ |
156 | unsigned long __initdata required_kernelcore; | 156 | static unsigned long __initdata required_kernelcore; |
157 | static unsigned long __initdata required_movablecore; | 157 | static unsigned long __initdata required_movablecore; |
158 | unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; | 158 | static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; |
159 | 159 | ||
160 | /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ | 160 | /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ |
161 | int movable_zone; | 161 | int movable_zone; |
@@ -674,9 +674,9 @@ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { | |||
674 | * Note that start_page and end_pages are not aligned on a pageblock | 674 | * Note that start_page and end_pages are not aligned on a pageblock |
675 | * boundary. If alignment is required, use move_freepages_block() | 675 | * boundary. If alignment is required, use move_freepages_block() |
676 | */ | 676 | */ |
677 | int move_freepages(struct zone *zone, | 677 | static int move_freepages(struct zone *zone, |
678 | struct page *start_page, struct page *end_page, | 678 | struct page *start_page, struct page *end_page, |
679 | int migratetype) | 679 | int migratetype) |
680 | { | 680 | { |
681 | struct page *page; | 681 | struct page *page; |
682 | unsigned long order; | 682 | unsigned long order; |
@@ -715,7 +715,8 @@ int move_freepages(struct zone *zone, | |||
715 | return pages_moved; | 715 | return pages_moved; |
716 | } | 716 | } |
717 | 717 | ||
718 | int move_freepages_block(struct zone *zone, struct page *page, int migratetype) | 718 | static int move_freepages_block(struct zone *zone, struct page *page, |
719 | int migratetype) | ||
719 | { | 720 | { |
720 | unsigned long start_pfn, end_pfn; | 721 | unsigned long start_pfn, end_pfn; |
721 | struct page *start_page, *end_page; | 722 | struct page *start_page, *end_page; |
@@ -2652,7 +2653,7 @@ static int zone_batchsize(struct zone *zone) | |||
2652 | return batch; | 2653 | return batch; |
2653 | } | 2654 | } |
2654 | 2655 | ||
2655 | inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) | 2656 | static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) |
2656 | { | 2657 | { |
2657 | struct per_cpu_pages *pcp; | 2658 | struct per_cpu_pages *pcp; |
2658 | 2659 | ||
@@ -3099,7 +3100,7 @@ void __meminit get_pfn_range_for_nid(unsigned int nid, | |||
3099 | * assumption is made that zones within a node are ordered in monotonic | 3100 | * assumption is made that zones within a node are ordered in monotonic |
3100 | * increasing memory addresses so that the "highest" populated zone is used | 3101 | * increasing memory addresses so that the "highest" populated zone is used |
3101 | */ | 3102 | */ |
3102 | void __init find_usable_zone_for_movable(void) | 3103 | static void __init find_usable_zone_for_movable(void) |
3103 | { | 3104 | { |
3104 | int zone_index; | 3105 | int zone_index; |
3105 | for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { | 3106 | for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { |
@@ -3125,7 +3126,7 @@ void __init find_usable_zone_for_movable(void) | |||
3125 | * highest usable zone for ZONE_MOVABLE. This preserves the assumption that | 3126 | * highest usable zone for ZONE_MOVABLE. This preserves the assumption that |
3126 | * zones within a node are in order of monotonic increases memory addresses | 3127 | * zones within a node are in order of monotonic increases memory addresses |
3127 | */ | 3128 | */ |
3128 | void __meminit adjust_zone_range_for_zone_movable(int nid, | 3129 | static void __meminit adjust_zone_range_for_zone_movable(int nid, |
3129 | unsigned long zone_type, | 3130 | unsigned long zone_type, |
3130 | unsigned long node_start_pfn, | 3131 | unsigned long node_start_pfn, |
3131 | unsigned long node_end_pfn, | 3132 | unsigned long node_end_pfn, |
@@ -3186,7 +3187,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid, | |||
3186 | * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, | 3187 | * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, |
3187 | * then all holes in the requested range will be accounted for. | 3188 | * then all holes in the requested range will be accounted for. |
3188 | */ | 3189 | */ |
3189 | unsigned long __meminit __absent_pages_in_range(int nid, | 3190 | static unsigned long __meminit __absent_pages_in_range(int nid, |
3190 | unsigned long range_start_pfn, | 3191 | unsigned long range_start_pfn, |
3191 | unsigned long range_end_pfn) | 3192 | unsigned long range_end_pfn) |
3192 | { | 3193 | { |
@@ -3723,7 +3724,7 @@ static void __init sort_node_map(void) | |||
3723 | } | 3724 | } |
3724 | 3725 | ||
3725 | /* Find the lowest pfn for a node */ | 3726 | /* Find the lowest pfn for a node */ |
3726 | unsigned long __init find_min_pfn_for_node(int nid) | 3727 | static unsigned long __init find_min_pfn_for_node(int nid) |
3727 | { | 3728 | { |
3728 | int i; | 3729 | int i; |
3729 | unsigned long min_pfn = ULONG_MAX; | 3730 | unsigned long min_pfn = ULONG_MAX; |
@@ -3795,7 +3796,7 @@ static unsigned long __init early_calculate_totalpages(void) | |||
3795 | * memory. When they don't, some nodes will have more kernelcore than | 3796 | * memory. When they don't, some nodes will have more kernelcore than |
3796 | * others | 3797 | * others |
3797 | */ | 3798 | */ |
3798 | void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) | 3799 | static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) |
3799 | { | 3800 | { |
3800 | int i, nid; | 3801 | int i, nid; |
3801 | unsigned long usable_startpfn; | 3802 | unsigned long usable_startpfn; |