diff options
| author | Jeff Garzik <jgarzik@pobox.com> | 2005-09-08 05:43:49 -0400 |
|---|---|---|
| committer | Jeff Garzik <jgarzik@pobox.com> | 2005-09-08 05:43:49 -0400 |
| commit | 1d6ae775d7a948c9575658eb41184fd2e506c0df (patch) | |
| tree | 8128a28e89d82f13bb8e3a2160382240c66e2816 /mm/vmscan.c | |
| parent | 739cdbf1d8f0739b80035b80d69d871e33749b86 (diff) | |
| parent | caf39e87cc1182f7dae84eefc43ca14d54c78ef9 (diff) | |
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'mm/vmscan.c')
| -rw-r--r-- | mm/vmscan.c | 20 |
1 files changed, 12 insertions, 8 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index cfffe5098d53..a740778f688d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -822,6 +822,8 @@ shrink_zone(struct zone *zone, struct scan_control *sc) | |||
| 822 | unsigned long nr_active; | 822 | unsigned long nr_active; |
| 823 | unsigned long nr_inactive; | 823 | unsigned long nr_inactive; |
| 824 | 824 | ||
| 825 | atomic_inc(&zone->reclaim_in_progress); | ||
| 826 | |||
| 825 | /* | 827 | /* |
| 826 | * Add one to `nr_to_scan' just to make sure that the kernel will | 828 | * Add one to `nr_to_scan' just to make sure that the kernel will |
| 827 | * slowly sift through the active list. | 829 | * slowly sift through the active list. |
| @@ -861,6 +863,8 @@ shrink_zone(struct zone *zone, struct scan_control *sc) | |||
| 861 | } | 863 | } |
| 862 | 864 | ||
| 863 | throttle_vm_writeout(); | 865 | throttle_vm_writeout(); |
| 866 | |||
| 867 | atomic_dec(&zone->reclaim_in_progress); | ||
| 864 | } | 868 | } |
| 865 | 869 | ||
| 866 | /* | 870 | /* |
| @@ -890,7 +894,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc) | |||
| 890 | if (zone->present_pages == 0) | 894 | if (zone->present_pages == 0) |
| 891 | continue; | 895 | continue; |
| 892 | 896 | ||
| 893 | if (!cpuset_zone_allowed(zone)) | 897 | if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) |
| 894 | continue; | 898 | continue; |
| 895 | 899 | ||
| 896 | zone->temp_priority = sc->priority; | 900 | zone->temp_priority = sc->priority; |
| @@ -900,9 +904,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc) | |||
| 900 | if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY) | 904 | if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY) |
| 901 | continue; /* Let kswapd poll it */ | 905 | continue; /* Let kswapd poll it */ |
| 902 | 906 | ||
| 903 | atomic_inc(&zone->reclaim_in_progress); | ||
| 904 | shrink_zone(zone, sc); | 907 | shrink_zone(zone, sc); |
| 905 | atomic_dec(&zone->reclaim_in_progress); | ||
| 906 | } | 908 | } |
| 907 | } | 909 | } |
| 908 | 910 | ||
| @@ -938,7 +940,7 @@ int try_to_free_pages(struct zone **zones, unsigned int gfp_mask) | |||
| 938 | for (i = 0; zones[i] != NULL; i++) { | 940 | for (i = 0; zones[i] != NULL; i++) { |
| 939 | struct zone *zone = zones[i]; | 941 | struct zone *zone = zones[i]; |
| 940 | 942 | ||
| 941 | if (!cpuset_zone_allowed(zone)) | 943 | if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) |
| 942 | continue; | 944 | continue; |
| 943 | 945 | ||
| 944 | zone->temp_priority = DEF_PRIORITY; | 946 | zone->temp_priority = DEF_PRIORITY; |
| @@ -984,7 +986,7 @@ out: | |||
| 984 | for (i = 0; zones[i] != 0; i++) { | 986 | for (i = 0; zones[i] != 0; i++) { |
| 985 | struct zone *zone = zones[i]; | 987 | struct zone *zone = zones[i]; |
| 986 | 988 | ||
| 987 | if (!cpuset_zone_allowed(zone)) | 989 | if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) |
| 988 | continue; | 990 | continue; |
| 989 | 991 | ||
| 990 | zone->prev_priority = zone->temp_priority; | 992 | zone->prev_priority = zone->temp_priority; |
| @@ -1254,7 +1256,7 @@ void wakeup_kswapd(struct zone *zone, int order) | |||
| 1254 | return; | 1256 | return; |
| 1255 | if (pgdat->kswapd_max_order < order) | 1257 | if (pgdat->kswapd_max_order < order) |
| 1256 | pgdat->kswapd_max_order = order; | 1258 | pgdat->kswapd_max_order = order; |
| 1257 | if (!cpuset_zone_allowed(zone)) | 1259 | if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) |
| 1258 | return; | 1260 | return; |
| 1259 | if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait)) | 1261 | if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait)) |
| 1260 | return; | 1262 | return; |
| @@ -1358,14 +1360,13 @@ int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order) | |||
| 1358 | sc.swap_cluster_max = SWAP_CLUSTER_MAX; | 1360 | sc.swap_cluster_max = SWAP_CLUSTER_MAX; |
| 1359 | 1361 | ||
| 1360 | /* Don't reclaim the zone if there are other reclaimers active */ | 1362 | /* Don't reclaim the zone if there are other reclaimers active */ |
| 1361 | if (!atomic_inc_and_test(&zone->reclaim_in_progress)) | 1363 | if (atomic_read(&zone->reclaim_in_progress) > 0) |
| 1362 | goto out; | 1364 | goto out; |
| 1363 | 1365 | ||
| 1364 | shrink_zone(zone, &sc); | 1366 | shrink_zone(zone, &sc); |
| 1365 | total_reclaimed = sc.nr_reclaimed; | 1367 | total_reclaimed = sc.nr_reclaimed; |
| 1366 | 1368 | ||
| 1367 | out: | 1369 | out: |
| 1368 | atomic_dec(&zone->reclaim_in_progress); | ||
| 1369 | return total_reclaimed; | 1370 | return total_reclaimed; |
| 1370 | } | 1371 | } |
| 1371 | 1372 | ||
| @@ -1375,6 +1376,9 @@ asmlinkage long sys_set_zone_reclaim(unsigned int node, unsigned int zone, | |||
| 1375 | struct zone *z; | 1376 | struct zone *z; |
| 1376 | int i; | 1377 | int i; |
| 1377 | 1378 | ||
| 1379 | if (!capable(CAP_SYS_ADMIN)) | ||
| 1380 | return -EACCES; | ||
| 1381 | |||
| 1378 | if (node >= MAX_NUMNODES || !node_online(node)) | 1382 | if (node >= MAX_NUMNODES || !node_online(node)) |
| 1379 | return -EINVAL; | 1383 | return -EINVAL; |
| 1380 | 1384 | ||
