diff options
author | Thierry Reding <thierry.reding@avionic-design.de> | 2012-12-12 16:51:17 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-12 20:38:32 -0500 |
commit | c8bf2d8ba4fbc093de7c0d192fe5d2531f14b8b9 (patch) | |
tree | 0b26144dd758cc6cdceee1b1d2972bfab76b094c /mm | |
parent | 3ea41e6210fea3b234b6cb3e9443e75975850bbf (diff) |
mm: compaction: Fix compiler warning
compact_capture_page() is only used if compaction is enabled so it should
be moved into the corresponding #ifdef.
Signed-off-by: Thierry Reding <thierry.reding@avionic-design.de>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/compaction.c | 108 |
1 files changed, 54 insertions, 54 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index d24dd2d7bad4..129791218226 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -215,60 +215,6 @@ static bool suitable_migration_target(struct page *page) | |||
215 | return false; | 215 | return false; |
216 | } | 216 | } |
217 | 217 | ||
218 | static void compact_capture_page(struct compact_control *cc) | ||
219 | { | ||
220 | unsigned long flags; | ||
221 | int mtype, mtype_low, mtype_high; | ||
222 | |||
223 | if (!cc->page || *cc->page) | ||
224 | return; | ||
225 | |||
226 | /* | ||
227 | * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP | ||
228 | * regardless of the migratetype of the freelist is is captured from. | ||
229 | * This is fine because the order for a high-order MIGRATE_MOVABLE | ||
230 | * allocation is typically at least a pageblock size and overall | ||
231 | * fragmentation is not impaired. Other allocation types must | ||
232 | * capture pages from their own migratelist because otherwise they | ||
233 | * could pollute other pageblocks like MIGRATE_MOVABLE with | ||
234 | * difficult to move pages and making fragmentation worse overall. | ||
235 | */ | ||
236 | if (cc->migratetype == MIGRATE_MOVABLE) { | ||
237 | mtype_low = 0; | ||
238 | mtype_high = MIGRATE_PCPTYPES; | ||
239 | } else { | ||
240 | mtype_low = cc->migratetype; | ||
241 | mtype_high = cc->migratetype + 1; | ||
242 | } | ||
243 | |||
244 | /* Speculatively examine the free lists without zone lock */ | ||
245 | for (mtype = mtype_low; mtype < mtype_high; mtype++) { | ||
246 | int order; | ||
247 | for (order = cc->order; order < MAX_ORDER; order++) { | ||
248 | struct page *page; | ||
249 | struct free_area *area; | ||
250 | area = &(cc->zone->free_area[order]); | ||
251 | if (list_empty(&area->free_list[mtype])) | ||
252 | continue; | ||
253 | |||
254 | /* Take the lock and attempt capture of the page */ | ||
255 | if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc)) | ||
256 | return; | ||
257 | if (!list_empty(&area->free_list[mtype])) { | ||
258 | page = list_entry(area->free_list[mtype].next, | ||
259 | struct page, lru); | ||
260 | if (capture_free_page(page, cc->order, mtype)) { | ||
261 | spin_unlock_irqrestore(&cc->zone->lock, | ||
262 | flags); | ||
263 | *cc->page = page; | ||
264 | return; | ||
265 | } | ||
266 | } | ||
267 | spin_unlock_irqrestore(&cc->zone->lock, flags); | ||
268 | } | ||
269 | } | ||
270 | } | ||
271 | |||
272 | /* | 218 | /* |
273 | * Isolate free pages onto a private freelist. Caller must hold zone->lock. | 219 | * Isolate free pages onto a private freelist. Caller must hold zone->lock. |
274 | * If @strict is true, will abort returning 0 on any invalid PFNs or non-free | 220 | * If @strict is true, will abort returning 0 on any invalid PFNs or non-free |
@@ -953,6 +899,60 @@ unsigned long compaction_suitable(struct zone *zone, int order) | |||
953 | return COMPACT_CONTINUE; | 899 | return COMPACT_CONTINUE; |
954 | } | 900 | } |
955 | 901 | ||
902 | static void compact_capture_page(struct compact_control *cc) | ||
903 | { | ||
904 | unsigned long flags; | ||
905 | int mtype, mtype_low, mtype_high; | ||
906 | |||
907 | if (!cc->page || *cc->page) | ||
908 | return; | ||
909 | |||
910 | /* | ||
911 | * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP | ||
912 | * regardless of the migratetype of the freelist is is captured from. | ||
913 | * This is fine because the order for a high-order MIGRATE_MOVABLE | ||
914 | * allocation is typically at least a pageblock size and overall | ||
915 | * fragmentation is not impaired. Other allocation types must | ||
916 | * capture pages from their own migratelist because otherwise they | ||
917 | * could pollute other pageblocks like MIGRATE_MOVABLE with | ||
918 | * difficult to move pages and making fragmentation worse overall. | ||
919 | */ | ||
920 | if (cc->migratetype == MIGRATE_MOVABLE) { | ||
921 | mtype_low = 0; | ||
922 | mtype_high = MIGRATE_PCPTYPES; | ||
923 | } else { | ||
924 | mtype_low = cc->migratetype; | ||
925 | mtype_high = cc->migratetype + 1; | ||
926 | } | ||
927 | |||
928 | /* Speculatively examine the free lists without zone lock */ | ||
929 | for (mtype = mtype_low; mtype < mtype_high; mtype++) { | ||
930 | int order; | ||
931 | for (order = cc->order; order < MAX_ORDER; order++) { | ||
932 | struct page *page; | ||
933 | struct free_area *area; | ||
934 | area = &(cc->zone->free_area[order]); | ||
935 | if (list_empty(&area->free_list[mtype])) | ||
936 | continue; | ||
937 | |||
938 | /* Take the lock and attempt capture of the page */ | ||
939 | if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc)) | ||
940 | return; | ||
941 | if (!list_empty(&area->free_list[mtype])) { | ||
942 | page = list_entry(area->free_list[mtype].next, | ||
943 | struct page, lru); | ||
944 | if (capture_free_page(page, cc->order, mtype)) { | ||
945 | spin_unlock_irqrestore(&cc->zone->lock, | ||
946 | flags); | ||
947 | *cc->page = page; | ||
948 | return; | ||
949 | } | ||
950 | } | ||
951 | spin_unlock_irqrestore(&cc->zone->lock, flags); | ||
952 | } | ||
953 | } | ||
954 | } | ||
955 | |||
956 | static int compact_zone(struct zone *zone, struct compact_control *cc) | 956 | static int compact_zone(struct zone *zone, struct compact_control *cc) |
957 | { | 957 | { |
958 | int ret; | 958 | int ret; |