diff options
author | Martin Hicks <mort@bork.org> | 2005-09-03 18:54:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@evo.osdl.org> | 2005-09-05 03:05:44 -0400 |
commit | 53e9a6159fdc6419874ce4d86d3577dbedc77b62 (patch) | |
tree | 7c7f15325569cf5e02dafc1974fb080154616058 /mm | |
parent | bce5f6ba340b09d8b29902add204bb95a6d3d88b (diff) |
[PATCH] VM: zone reclaim atomic ops cleanup
Christoph Lameter and Marcelo Tosatti asked to get rid of the
atomic_inc_and_test() to cleanup the atomic ops in the zone reclaim code.
Signed-off-by: Martin Hicks <mort@sgi.com>
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 9 |
2 files changed, 6 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 620aa11b24eb..d157dae8c9f3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1909,7 +1909,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat, | |||
1909 | zone->nr_scan_inactive = 0; | 1909 | zone->nr_scan_inactive = 0; |
1910 | zone->nr_active = 0; | 1910 | zone->nr_active = 0; |
1911 | zone->nr_inactive = 0; | 1911 | zone->nr_inactive = 0; |
1912 | atomic_set(&zone->reclaim_in_progress, -1); | 1912 | atomic_set(&zone->reclaim_in_progress, 0); |
1913 | if (!size) | 1913 | if (!size) |
1914 | continue; | 1914 | continue; |
1915 | 1915 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index ab631a3c62c3..0095533cdde9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -822,6 +822,8 @@ shrink_zone(struct zone *zone, struct scan_control *sc) | |||
822 | unsigned long nr_active; | 822 | unsigned long nr_active; |
823 | unsigned long nr_inactive; | 823 | unsigned long nr_inactive; |
824 | 824 | ||
825 | atomic_inc(&zone->reclaim_in_progress); | ||
826 | |||
825 | /* | 827 | /* |
826 | * Add one to `nr_to_scan' just to make sure that the kernel will | 828 | * Add one to `nr_to_scan' just to make sure that the kernel will |
827 | * slowly sift through the active list. | 829 | * slowly sift through the active list. |
@@ -861,6 +863,8 @@ shrink_zone(struct zone *zone, struct scan_control *sc) | |||
861 | } | 863 | } |
862 | 864 | ||
863 | throttle_vm_writeout(); | 865 | throttle_vm_writeout(); |
866 | |||
867 | atomic_dec(&zone->reclaim_in_progress); | ||
864 | } | 868 | } |
865 | 869 | ||
866 | /* | 870 | /* |
@@ -900,9 +904,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc) | |||
900 | if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY) | 904 | if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY) |
901 | continue; /* Let kswapd poll it */ | 905 | continue; /* Let kswapd poll it */ |
902 | 906 | ||
903 | atomic_inc(&zone->reclaim_in_progress); | ||
904 | shrink_zone(zone, sc); | 907 | shrink_zone(zone, sc); |
905 | atomic_dec(&zone->reclaim_in_progress); | ||
906 | } | 908 | } |
907 | } | 909 | } |
908 | 910 | ||
@@ -1358,14 +1360,13 @@ int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order) | |||
1358 | sc.swap_cluster_max = SWAP_CLUSTER_MAX; | 1360 | sc.swap_cluster_max = SWAP_CLUSTER_MAX; |
1359 | 1361 | ||
1360 | /* Don't reclaim the zone if there are other reclaimers active */ | 1362 | /* Don't reclaim the zone if there are other reclaimers active */ |
1361 | if (!atomic_inc_and_test(&zone->reclaim_in_progress)) | 1363 | if (atomic_read(&zone->reclaim_in_progress) > 0) |
1362 | goto out; | 1364 | goto out; |
1363 | 1365 | ||
1364 | shrink_zone(zone, &sc); | 1366 | shrink_zone(zone, &sc); |
1365 | total_reclaimed = sc.nr_reclaimed; | 1367 | total_reclaimed = sc.nr_reclaimed; |
1366 | 1368 | ||
1367 | out: | 1369 | out: |
1368 | atomic_dec(&zone->reclaim_in_progress); | ||
1369 | return total_reclaimed; | 1370 | return total_reclaimed; |
1370 | } | 1371 | } |
1371 | 1372 | ||