diff options
author | David Rientjes <rientjes@google.com> | 2007-10-17 02:25:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-17 11:42:45 -0400 |
commit | e815af95f94914993bbad279c71cf5fef9f4eaac (patch) | |
tree | 492e0d3e8d3303f37cf9fb0beecf952a1c828c53 | |
parent | 70e24bdf6d2fead14631e72a07fba012400c521e (diff) |
oom: change all_unreclaimable zone member to flags
Convert the int all_unreclaimable member of struct zone to unsigned long
flags. This can now be used to specify several different zone flags such as
all_unreclaimable and reclaim_in_progress, which can now be removed and
converted to a per-zone flag.
Flags are set and cleared as follows:
zone_set_flag(struct zone *zone, zone_flags_t flag)
zone_clear_flag(struct zone *zone, zone_flags_t flag)
Defines the first zone flags, ZONE_ALL_UNRECLAIMABLE and ZONE_RECLAIM_LOCKED,
which have the same semantics as the old zone->all_unreclaimable and
zone->reclaim_in_progress, respectively. Also converts all current users that
set or clear either flag to use the new interface.
Helper functions are defined to test the flags:
int zone_is_all_unreclaimable(const struct zone *zone)
int zone_is_reclaim_locked(const struct zone *zone)
All flag operators are of the atomic variety because there are currently
readers that are implemented that do not take zone->lock.
[akpm@linux-foundation.org: add needed include]
Cc: Andrea Arcangeli <andrea@suse.de>
Acked-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/mmzone.h | 29 | ||||
-rw-r--r-- | mm/page_alloc.c | 8 | ||||
-rw-r--r-- | mm/vmscan.c | 25 | ||||
-rw-r--r-- | mm/vmstat.c | 2 |
4 files changed, 43 insertions, 21 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f4bfe824834f..bad9486ee0cc 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/spinlock.h> | 7 | #include <linux/spinlock.h> |
8 | #include <linux/list.h> | 8 | #include <linux/list.h> |
9 | #include <linux/wait.h> | 9 | #include <linux/wait.h> |
10 | #include <linux/bitops.h> | ||
10 | #include <linux/cache.h> | 11 | #include <linux/cache.h> |
11 | #include <linux/threads.h> | 12 | #include <linux/threads.h> |
12 | #include <linux/numa.h> | 13 | #include <linux/numa.h> |
@@ -262,10 +263,7 @@ struct zone { | |||
262 | unsigned long nr_scan_active; | 263 | unsigned long nr_scan_active; |
263 | unsigned long nr_scan_inactive; | 264 | unsigned long nr_scan_inactive; |
264 | unsigned long pages_scanned; /* since last reclaim */ | 265 | unsigned long pages_scanned; /* since last reclaim */ |
265 | int all_unreclaimable; /* All pages pinned */ | 266 | unsigned long flags; /* zone flags, see below */ |
266 | |||
267 | /* A count of how many reclaimers are scanning this zone */ | ||
268 | atomic_t reclaim_in_progress; | ||
269 | 267 | ||
270 | /* Zone statistics */ | 268 | /* Zone statistics */ |
271 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | 269 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; |
@@ -343,6 +341,29 @@ struct zone { | |||
343 | const char *name; | 341 | const char *name; |
344 | } ____cacheline_internodealigned_in_smp; | 342 | } ____cacheline_internodealigned_in_smp; |
345 | 343 | ||
344 | typedef enum { | ||
345 | ZONE_ALL_UNRECLAIMABLE, /* all pages pinned */ | ||
346 | ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ | ||
347 | } zone_flags_t; | ||
348 | |||
349 | static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) | ||
350 | { | ||
351 | set_bit(flag, &zone->flags); | ||
352 | } | ||
353 | static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) | ||
354 | { | ||
355 | clear_bit(flag, &zone->flags); | ||
356 | } | ||
357 | |||
358 | static inline int zone_is_all_unreclaimable(const struct zone *zone) | ||
359 | { | ||
360 | return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags); | ||
361 | } | ||
362 | static inline int zone_is_reclaim_locked(const struct zone *zone) | ||
363 | { | ||
364 | return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); | ||
365 | } | ||
366 | |||
346 | /* | 367 | /* |
347 | * The "priority" of VM scanning is how much of the queues we will scan in one | 368 | * The "priority" of VM scanning is how much of the queues we will scan in one |
348 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | 369 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0e3a05fd4734..fd2df29cc645 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -490,7 +490,7 @@ static void free_pages_bulk(struct zone *zone, int count, | |||
490 | struct list_head *list, int order) | 490 | struct list_head *list, int order) |
491 | { | 491 | { |
492 | spin_lock(&zone->lock); | 492 | spin_lock(&zone->lock); |
493 | zone->all_unreclaimable = 0; | 493 | zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); |
494 | zone->pages_scanned = 0; | 494 | zone->pages_scanned = 0; |
495 | while (count--) { | 495 | while (count--) { |
496 | struct page *page; | 496 | struct page *page; |
@@ -507,7 +507,7 @@ static void free_pages_bulk(struct zone *zone, int count, | |||
507 | static void free_one_page(struct zone *zone, struct page *page, int order) | 507 | static void free_one_page(struct zone *zone, struct page *page, int order) |
508 | { | 508 | { |
509 | spin_lock(&zone->lock); | 509 | spin_lock(&zone->lock); |
510 | zone->all_unreclaimable = 0; | 510 | zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); |
511 | zone->pages_scanned = 0; | 511 | zone->pages_scanned = 0; |
512 | __free_one_page(page, zone, order); | 512 | __free_one_page(page, zone, order); |
513 | spin_unlock(&zone->lock); | 513 | spin_unlock(&zone->lock); |
@@ -1851,7 +1851,7 @@ void show_free_areas(void) | |||
1851 | K(zone_page_state(zone, NR_INACTIVE)), | 1851 | K(zone_page_state(zone, NR_INACTIVE)), |
1852 | K(zone->present_pages), | 1852 | K(zone->present_pages), |
1853 | zone->pages_scanned, | 1853 | zone->pages_scanned, |
1854 | (zone->all_unreclaimable ? "yes" : "no") | 1854 | (zone_is_all_unreclaimable(zone) ? "yes" : "no") |
1855 | ); | 1855 | ); |
1856 | printk("lowmem_reserve[]:"); | 1856 | printk("lowmem_reserve[]:"); |
1857 | for (i = 0; i < MAX_NR_ZONES; i++) | 1857 | for (i = 0; i < MAX_NR_ZONES; i++) |
@@ -3372,7 +3372,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat, | |||
3372 | zone->nr_scan_active = 0; | 3372 | zone->nr_scan_active = 0; |
3373 | zone->nr_scan_inactive = 0; | 3373 | zone->nr_scan_inactive = 0; |
3374 | zap_zone_vm_stats(zone); | 3374 | zap_zone_vm_stats(zone); |
3375 | atomic_set(&zone->reclaim_in_progress, 0); | 3375 | zone->flags = 0; |
3376 | if (!size) | 3376 | if (!size) |
3377 | continue; | 3377 | continue; |
3378 | 3378 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index bbd194630c5b..d8893dc2d4eb 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1108,7 +1108,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone, | |||
1108 | unsigned long nr_to_scan; | 1108 | unsigned long nr_to_scan; |
1109 | unsigned long nr_reclaimed = 0; | 1109 | unsigned long nr_reclaimed = 0; |
1110 | 1110 | ||
1111 | atomic_inc(&zone->reclaim_in_progress); | 1111 | zone_set_flag(zone, ZONE_RECLAIM_LOCKED); |
1112 | 1112 | ||
1113 | /* | 1113 | /* |
1114 | * Add one to `nr_to_scan' just to make sure that the kernel will | 1114 | * Add one to `nr_to_scan' just to make sure that the kernel will |
@@ -1149,7 +1149,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone, | |||
1149 | 1149 | ||
1150 | throttle_vm_writeout(sc->gfp_mask); | 1150 | throttle_vm_writeout(sc->gfp_mask); |
1151 | 1151 | ||
1152 | atomic_dec(&zone->reclaim_in_progress); | 1152 | zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); |
1153 | return nr_reclaimed; | 1153 | return nr_reclaimed; |
1154 | } | 1154 | } |
1155 | 1155 | ||
@@ -1187,7 +1187,7 @@ static unsigned long shrink_zones(int priority, struct zone **zones, | |||
1187 | 1187 | ||
1188 | note_zone_scanning_priority(zone, priority); | 1188 | note_zone_scanning_priority(zone, priority); |
1189 | 1189 | ||
1190 | if (zone->all_unreclaimable && priority != DEF_PRIORITY) | 1190 | if (zone_is_all_unreclaimable(zone) && priority != DEF_PRIORITY) |
1191 | continue; /* Let kswapd poll it */ | 1191 | continue; /* Let kswapd poll it */ |
1192 | 1192 | ||
1193 | sc->all_unreclaimable = 0; | 1193 | sc->all_unreclaimable = 0; |
@@ -1368,7 +1368,8 @@ loop_again: | |||
1368 | if (!populated_zone(zone)) | 1368 | if (!populated_zone(zone)) |
1369 | continue; | 1369 | continue; |
1370 | 1370 | ||
1371 | if (zone->all_unreclaimable && priority != DEF_PRIORITY) | 1371 | if (zone_is_all_unreclaimable(zone) && |
1372 | priority != DEF_PRIORITY) | ||
1372 | continue; | 1373 | continue; |
1373 | 1374 | ||
1374 | if (!zone_watermark_ok(zone, order, zone->pages_high, | 1375 | if (!zone_watermark_ok(zone, order, zone->pages_high, |
@@ -1403,7 +1404,8 @@ loop_again: | |||
1403 | if (!populated_zone(zone)) | 1404 | if (!populated_zone(zone)) |
1404 | continue; | 1405 | continue; |
1405 | 1406 | ||
1406 | if (zone->all_unreclaimable && priority != DEF_PRIORITY) | 1407 | if (zone_is_all_unreclaimable(zone) && |
1408 | priority != DEF_PRIORITY) | ||
1407 | continue; | 1409 | continue; |
1408 | 1410 | ||
1409 | if (!zone_watermark_ok(zone, order, zone->pages_high, | 1411 | if (!zone_watermark_ok(zone, order, zone->pages_high, |
@@ -1424,12 +1426,13 @@ loop_again: | |||
1424 | lru_pages); | 1426 | lru_pages); |
1425 | nr_reclaimed += reclaim_state->reclaimed_slab; | 1427 | nr_reclaimed += reclaim_state->reclaimed_slab; |
1426 | total_scanned += sc.nr_scanned; | 1428 | total_scanned += sc.nr_scanned; |
1427 | if (zone->all_unreclaimable) | 1429 | if (zone_is_all_unreclaimable(zone)) |
1428 | continue; | 1430 | continue; |
1429 | if (nr_slab == 0 && zone->pages_scanned >= | 1431 | if (nr_slab == 0 && zone->pages_scanned >= |
1430 | (zone_page_state(zone, NR_ACTIVE) | 1432 | (zone_page_state(zone, NR_ACTIVE) |
1431 | + zone_page_state(zone, NR_INACTIVE)) * 6) | 1433 | + zone_page_state(zone, NR_INACTIVE)) * 6) |
1432 | zone->all_unreclaimable = 1; | 1434 | zone_set_flag(zone, |
1435 | ZONE_ALL_UNRECLAIMABLE); | ||
1433 | /* | 1436 | /* |
1434 | * If we've done a decent amount of scanning and | 1437 | * If we've done a decent amount of scanning and |
1435 | * the reclaim ratio is low, start doing writepage | 1438 | * the reclaim ratio is low, start doing writepage |
@@ -1595,7 +1598,7 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, | |||
1595 | if (!populated_zone(zone)) | 1598 | if (!populated_zone(zone)) |
1596 | continue; | 1599 | continue; |
1597 | 1600 | ||
1598 | if (zone->all_unreclaimable && prio != DEF_PRIORITY) | 1601 | if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) |
1599 | continue; | 1602 | continue; |
1600 | 1603 | ||
1601 | /* For pass = 0 we don't shrink the active list */ | 1604 | /* For pass = 0 we don't shrink the active list */ |
@@ -1919,10 +1922,8 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
1919 | * not have reclaimable pages and if we should not delay the allocation | 1922 | * not have reclaimable pages and if we should not delay the allocation |
1920 | * then do not scan. | 1923 | * then do not scan. |
1921 | */ | 1924 | */ |
1922 | if (!(gfp_mask & __GFP_WAIT) || | 1925 | if (!(gfp_mask & __GFP_WAIT) || zone_is_all_unreclaimable(zone) || |
1923 | zone->all_unreclaimable || | 1926 | zone_is_reclaim_locked(zone) || (current->flags & PF_MEMALLOC)) |
1924 | atomic_read(&zone->reclaim_in_progress) > 0 || | ||
1925 | (current->flags & PF_MEMALLOC)) | ||
1926 | return 0; | 1927 | return 0; |
1927 | 1928 | ||
1928 | /* | 1929 | /* |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 3b5e9043e7db..4651bf153f35 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -704,7 +704,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, | |||
704 | "\n all_unreclaimable: %u" | 704 | "\n all_unreclaimable: %u" |
705 | "\n prev_priority: %i" | 705 | "\n prev_priority: %i" |
706 | "\n start_pfn: %lu", | 706 | "\n start_pfn: %lu", |
707 | zone->all_unreclaimable, | 707 | zone_is_all_unreclaimable(zone), |
708 | zone->prev_priority, | 708 | zone->prev_priority, |
709 | zone->zone_start_pfn); | 709 | zone->zone_start_pfn); |
710 | seq_putc(m, '\n'); | 710 | seq_putc(m, '\n'); |