diff options
-rw-r--r-- | include/asm-i386/unistd.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/unistd.h | 2 | ||||
-rw-r--r-- | include/linux/swap.h | 1 | ||||
-rw-r--r-- | mm/vmscan.c | 80 |
4 files changed, 2 insertions, 83 deletions
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index 0f92e78dfea1..fe38b9a96233 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h | |||
@@ -256,7 +256,7 @@ | |||
256 | #define __NR_io_submit 248 | 256 | #define __NR_io_submit 248 |
257 | #define __NR_io_cancel 249 | 257 | #define __NR_io_cancel 249 |
258 | #define __NR_fadvise64 250 | 258 | #define __NR_fadvise64 250 |
259 | #define __NR_set_zone_reclaim 251 | 259 | /* 251 is available for reuse (was briefly sys_set_zone_reclaim) */ |
260 | #define __NR_exit_group 252 | 260 | #define __NR_exit_group 252 |
261 | #define __NR_lookup_dcookie 253 | 261 | #define __NR_lookup_dcookie 253 |
262 | #define __NR_epoll_create 254 | 262 | #define __NR_epoll_create 254 |
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h index 6d96a67439be..2bf543493cb8 100644 --- a/include/asm-ia64/unistd.h +++ b/include/asm-ia64/unistd.h | |||
@@ -265,7 +265,7 @@ | |||
265 | #define __NR_keyctl 1273 | 265 | #define __NR_keyctl 1273 |
266 | #define __NR_ioprio_set 1274 | 266 | #define __NR_ioprio_set 1274 |
267 | #define __NR_ioprio_get 1275 | 267 | #define __NR_ioprio_get 1275 |
268 | #define __NR_set_zone_reclaim 1276 | 268 | /* 1276 is available for reuse (was briefly sys_set_zone_reclaim) */ |
269 | #define __NR_inotify_init 1277 | 269 | #define __NR_inotify_init 1277 |
270 | #define __NR_inotify_add_watch 1278 | 270 | #define __NR_inotify_add_watch 1278 |
271 | #define __NR_inotify_rm_watch 1279 | 271 | #define __NR_inotify_rm_watch 1279 |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 508668f840b6..bd6641784107 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -172,7 +172,6 @@ extern void swap_setup(void); | |||
172 | 172 | ||
173 | /* linux/mm/vmscan.c */ | 173 | /* linux/mm/vmscan.c */ |
174 | extern int try_to_free_pages(struct zone **, gfp_t); | 174 | extern int try_to_free_pages(struct zone **, gfp_t); |
175 | extern int zone_reclaim(struct zone *, gfp_t, unsigned int); | ||
176 | extern int shrink_all_memory(int); | 175 | extern int shrink_all_memory(int); |
177 | extern int vm_swappiness; | 176 | extern int vm_swappiness; |
178 | 177 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 795a050fe471..b2baca7645d7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -74,9 +74,6 @@ struct scan_control { | |||
74 | 74 | ||
75 | int may_writepage; | 75 | int may_writepage; |
76 | 76 | ||
77 | /* Can pages be swapped as part of reclaim? */ | ||
78 | int may_swap; | ||
79 | |||
80 | /* This context's SWAP_CLUSTER_MAX. If freeing memory for | 77 | /* This context's SWAP_CLUSTER_MAX. If freeing memory for |
81 | * suspend, we effectively ignore SWAP_CLUSTER_MAX. | 78 | * suspend, we effectively ignore SWAP_CLUSTER_MAX. |
82 | * In this context, it doesn't matter that we scan the | 79 | * In this context, it doesn't matter that we scan the |
@@ -430,8 +427,6 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) | |||
430 | * Try to allocate it some swap space here. | 427 | * Try to allocate it some swap space here. |
431 | */ | 428 | */ |
432 | if (PageAnon(page) && !PageSwapCache(page)) { | 429 | if (PageAnon(page) && !PageSwapCache(page)) { |
433 | if (!sc->may_swap) | ||
434 | goto keep_locked; | ||
435 | if (!add_to_swap(page)) | 430 | if (!add_to_swap(page)) |
436 | goto activate_locked; | 431 | goto activate_locked; |
437 | } | 432 | } |
@@ -952,7 +947,6 @@ int try_to_free_pages(struct zone **zones, gfp_t gfp_mask) | |||
952 | 947 | ||
953 | sc.gfp_mask = gfp_mask; | 948 | sc.gfp_mask = gfp_mask; |
954 | sc.may_writepage = 0; | 949 | sc.may_writepage = 0; |
955 | sc.may_swap = 1; | ||
956 | 950 | ||
957 | inc_page_state(allocstall); | 951 | inc_page_state(allocstall); |
958 | 952 | ||
@@ -1055,7 +1049,6 @@ loop_again: | |||
1055 | total_reclaimed = 0; | 1049 | total_reclaimed = 0; |
1056 | sc.gfp_mask = GFP_KERNEL; | 1050 | sc.gfp_mask = GFP_KERNEL; |
1057 | sc.may_writepage = 0; | 1051 | sc.may_writepage = 0; |
1058 | sc.may_swap = 1; | ||
1059 | sc.nr_mapped = read_page_state(nr_mapped); | 1052 | sc.nr_mapped = read_page_state(nr_mapped); |
1060 | 1053 | ||
1061 | inc_page_state(pageoutrun); | 1054 | inc_page_state(pageoutrun); |
@@ -1353,76 +1346,3 @@ static int __init kswapd_init(void) | |||
1353 | } | 1346 | } |
1354 | 1347 | ||
1355 | module_init(kswapd_init) | 1348 | module_init(kswapd_init) |
1356 | |||
1357 | |||
1358 | /* | ||
1359 | * Try to free up some pages from this zone through reclaim. | ||
1360 | */ | ||
1361 | int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | ||
1362 | { | ||
1363 | struct scan_control sc; | ||
1364 | int nr_pages = 1 << order; | ||
1365 | int total_reclaimed = 0; | ||
1366 | |||
1367 | /* The reclaim may sleep, so don't do it if sleep isn't allowed */ | ||
1368 | if (!(gfp_mask & __GFP_WAIT)) | ||
1369 | return 0; | ||
1370 | if (zone->all_unreclaimable) | ||
1371 | return 0; | ||
1372 | |||
1373 | sc.gfp_mask = gfp_mask; | ||
1374 | sc.may_writepage = 0; | ||
1375 | sc.may_swap = 0; | ||
1376 | sc.nr_mapped = read_page_state(nr_mapped); | ||
1377 | sc.nr_scanned = 0; | ||
1378 | sc.nr_reclaimed = 0; | ||
1379 | /* scan at the highest priority */ | ||
1380 | sc.priority = 0; | ||
1381 | disable_swap_token(); | ||
1382 | |||
1383 | if (nr_pages > SWAP_CLUSTER_MAX) | ||
1384 | sc.swap_cluster_max = nr_pages; | ||
1385 | else | ||
1386 | sc.swap_cluster_max = SWAP_CLUSTER_MAX; | ||
1387 | |||
1388 | /* Don't reclaim the zone if there are other reclaimers active */ | ||
1389 | if (atomic_read(&zone->reclaim_in_progress) > 0) | ||
1390 | goto out; | ||
1391 | |||
1392 | shrink_zone(zone, &sc); | ||
1393 | total_reclaimed = sc.nr_reclaimed; | ||
1394 | |||
1395 | out: | ||
1396 | return total_reclaimed; | ||
1397 | } | ||
1398 | |||
1399 | asmlinkage long sys_set_zone_reclaim(unsigned int node, unsigned int zone, | ||
1400 | unsigned int state) | ||
1401 | { | ||
1402 | struct zone *z; | ||
1403 | int i; | ||
1404 | |||
1405 | if (!capable(CAP_SYS_ADMIN)) | ||
1406 | return -EACCES; | ||
1407 | |||
1408 | if (node >= MAX_NUMNODES || !node_online(node)) | ||
1409 | return -EINVAL; | ||
1410 | |||
1411 | /* This will break if we ever add more zones */ | ||
1412 | if (!(zone & (1<<ZONE_DMA|1<<ZONE_NORMAL|1<<ZONE_HIGHMEM))) | ||
1413 | return -EINVAL; | ||
1414 | |||
1415 | for (i = 0; i < MAX_NR_ZONES; i++) { | ||
1416 | if (!(zone & 1<<i)) | ||
1417 | continue; | ||
1418 | |||
1419 | z = &NODE_DATA(node)->node_zones[i]; | ||
1420 | |||
1421 | if (state) | ||
1422 | z->reclaim_pages = 1; | ||
1423 | else | ||
1424 | z->reclaim_pages = 0; | ||
1425 | } | ||
1426 | |||
1427 | return 0; | ||
1428 | } | ||