diff options
author | Paul Jackson <pj@sgi.com> | 2006-12-06 23:31:38 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-07 11:39:20 -0500 |
commit | 0798e5193cd70f6c867ec176d7730589f944c627 (patch) | |
tree | abe3ada0b04080729418a0c301d8b55b4363b56e | |
parent | a2ce774096110ccc5c02cbdc05897d005fcd3db8 (diff) |
[PATCH] memory page alloc minor cleanups
- s/freeliest/freelist/ spelling fix
- Check for NULL *z zone seems useless - even if it could happen, so
what? Perhaps we should have a check later on if we are faced with an
allocation request that is not allowed to fail - shouldn't that be a
serious kernel error, passing an empty zonelist with a mandate to not
fail?
- Initializing 'z' to zonelist->zones can wait until after the first
get_page_from_freelist() fails; we only use 'z' in the wakeup_kswapd()
loop, so let's initialize 'z' there, in a 'for' loop. Seems clearer.
- Remove superfluous braces around a break
- Fix a couple errant spaces
- Adjust indentation on the cpuset_zone_allowed() check, to match the
lines just before it -- seems easier to read in this case.
- Add another set of braces to the zone_watermark_ok logic
From: Paul Jackson <pj@sgi.com>
Backout one item from a previous "memory page_alloc minor cleanups" patch.
Until and unless we are certain that no one can ever pass an empty zonelist
to __alloc_pages(), this check for an empty zonelist (or some BUG
equivalent) is essential. The code in get_page_from_freelist() blow ups if
passed an empty zonelist.
Signed-off-by: Paul Jackson <pj@sgi.com>
Acked-by: Christoph Lameter <clameter@sgi.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Paul Jackson <pj@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | mm/page_alloc.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index aa6fcc7ca66f..08360aa111f9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -486,7 +486,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order) | |||
486 | spin_lock(&zone->lock); | 486 | spin_lock(&zone->lock); |
487 | zone->all_unreclaimable = 0; | 487 | zone->all_unreclaimable = 0; |
488 | zone->pages_scanned = 0; | 488 | zone->pages_scanned = 0; |
489 | __free_one_page(page, zone ,order); | 489 | __free_one_page(page, zone, order); |
490 | spin_unlock(&zone->lock); | 490 | spin_unlock(&zone->lock); |
491 | } | 491 | } |
492 | 492 | ||
@@ -926,7 +926,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |||
926 | } | 926 | } |
927 | 927 | ||
928 | /* | 928 | /* |
929 | * get_page_from_freeliest goes through the zonelist trying to allocate | 929 | * get_page_from_freelist goes through the zonelist trying to allocate |
930 | * a page. | 930 | * a page. |
931 | */ | 931 | */ |
932 | static struct page * | 932 | static struct page * |
@@ -948,8 +948,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, | |||
948 | zone->zone_pgdat != zonelist->zones[0]->zone_pgdat)) | 948 | zone->zone_pgdat != zonelist->zones[0]->zone_pgdat)) |
949 | break; | 949 | break; |
950 | if ((alloc_flags & ALLOC_CPUSET) && | 950 | if ((alloc_flags & ALLOC_CPUSET) && |
951 | !cpuset_zone_allowed(zone, gfp_mask)) | 951 | !cpuset_zone_allowed(zone, gfp_mask)) |
952 | continue; | 952 | continue; |
953 | 953 | ||
954 | if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { | 954 | if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { |
955 | unsigned long mark; | 955 | unsigned long mark; |
@@ -959,17 +959,18 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, | |||
959 | mark = zone->pages_low; | 959 | mark = zone->pages_low; |
960 | else | 960 | else |
961 | mark = zone->pages_high; | 961 | mark = zone->pages_high; |
962 | if (!zone_watermark_ok(zone , order, mark, | 962 | if (!zone_watermark_ok(zone, order, mark, |
963 | classzone_idx, alloc_flags)) | 963 | classzone_idx, alloc_flags)) { |
964 | if (!zone_reclaim_mode || | 964 | if (!zone_reclaim_mode || |
965 | !zone_reclaim(zone, gfp_mask, order)) | 965 | !zone_reclaim(zone, gfp_mask, order)) |
966 | continue; | 966 | continue; |
967 | } | ||
967 | } | 968 | } |
968 | 969 | ||
969 | page = buffered_rmqueue(zonelist, zone, order, gfp_mask); | 970 | page = buffered_rmqueue(zonelist, zone, order, gfp_mask); |
970 | if (page) { | 971 | if (page) |
971 | break; | 972 | break; |
972 | } | 973 | |
973 | } while (*(++z) != NULL); | 974 | } while (*(++z) != NULL); |
974 | return page; | 975 | return page; |
975 | } | 976 | } |
@@ -1005,9 +1006,8 @@ restart: | |||
1005 | if (page) | 1006 | if (page) |
1006 | goto got_pg; | 1007 | goto got_pg; |
1007 | 1008 | ||
1008 | do { | 1009 | for (z = zonelist->zones; *z; z++) |
1009 | wakeup_kswapd(*z, order); | 1010 | wakeup_kswapd(*z, order); |
1010 | } while (*(++z)); | ||
1011 | 1011 | ||
1012 | /* | 1012 | /* |
1013 | * OK, we're below the kswapd watermark and have kicked background | 1013 | * OK, we're below the kswapd watermark and have kicked background |