aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c33
-rw-r--r--mm/vmscan.c64
2 files changed, 92 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 40169f0b7e9e..3c0f69ded6b5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -724,6 +724,14 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
724 return 1; 724 return 1;
725} 725}
726 726
727static inline int
728should_reclaim_zone(struct zone *z, unsigned int gfp_mask)
729{
730 if (!z->reclaim_pages)
731 return 0;
732 return 1;
733}
734
727/* 735/*
728 * This is the 'heart' of the zoned buddy allocator. 736 * This is the 'heart' of the zoned buddy allocator.
729 */ 737 */
@@ -760,17 +768,32 @@ __alloc_pages(unsigned int __nocast gfp_mask, unsigned int order,
760 768
761 classzone_idx = zone_idx(zones[0]); 769 classzone_idx = zone_idx(zones[0]);
762 770
763 restart: 771restart:
764 /* Go through the zonelist once, looking for a zone with enough free */ 772 /* Go through the zonelist once, looking for a zone with enough free */
765 for (i = 0; (z = zones[i]) != NULL; i++) { 773 for (i = 0; (z = zones[i]) != NULL; i++) {
766 774 int do_reclaim = should_reclaim_zone(z, gfp_mask);
767 if (!zone_watermark_ok(z, order, z->pages_low,
768 classzone_idx, 0, 0))
769 continue;
770 775
771 if (!cpuset_zone_allowed(z)) 776 if (!cpuset_zone_allowed(z))
772 continue; 777 continue;
773 778
779 /*
780 * If the zone is to attempt early page reclaim then this loop
781 * will try to reclaim pages and check the watermark a second
782 * time before giving up and falling back to the next zone.
783 */
784zone_reclaim_retry:
785 if (!zone_watermark_ok(z, order, z->pages_low,
786 classzone_idx, 0, 0)) {
787 if (!do_reclaim)
788 continue;
789 else {
790 zone_reclaim(z, gfp_mask, order);
791 /* Only try reclaim once */
792 do_reclaim = 0;
793 goto zone_reclaim_retry;
794 }
795 }
796
774 page = buffered_rmqueue(z, order, gfp_mask); 797 page = buffered_rmqueue(z, order, gfp_mask);
775 if (page) 798 if (page)
776 goto got_pg; 799 goto got_pg;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6379ddbffd9b..7da846960d8a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1323,3 +1323,67 @@ static int __init kswapd_init(void)
1323} 1323}
1324 1324
1325module_init(kswapd_init) 1325module_init(kswapd_init)
1326
1327
1328/*
1329 * Try to free up some pages from this zone through reclaim.
1330 */
1331int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order)
1332{
1333 struct scan_control sc;
1334 int nr_pages = 1 << order;
1335 int total_reclaimed = 0;
1336
1337 /* The reclaim may sleep, so don't do it if sleep isn't allowed */
1338 if (!(gfp_mask & __GFP_WAIT))
1339 return 0;
1340 if (zone->all_unreclaimable)
1341 return 0;
1342
1343 sc.gfp_mask = gfp_mask;
1344 sc.may_writepage = 0;
1345 sc.may_swap = 0;
1346 sc.nr_mapped = read_page_state(nr_mapped);
1347 sc.nr_scanned = 0;
1348 sc.nr_reclaimed = 0;
1349 /* scan at the highest priority */
1350 sc.priority = 0;
1351
1352 if (nr_pages > SWAP_CLUSTER_MAX)
1353 sc.swap_cluster_max = nr_pages;
1354 else
1355 sc.swap_cluster_max = SWAP_CLUSTER_MAX;
1356
1357 shrink_zone(zone, &sc);
1358 total_reclaimed = sc.nr_reclaimed;
1359
1360 return total_reclaimed;
1361}
1362
1363asmlinkage long sys_set_zone_reclaim(unsigned int node, unsigned int zone,
1364 unsigned int state)
1365{
1366 struct zone *z;
1367 int i;
1368
1369 if (node >= MAX_NUMNODES || !node_online(node))
1370 return -EINVAL;
1371
1372 /* This will break if we ever add more zones */
1373 if (!(zone & (1<<ZONE_DMA|1<<ZONE_NORMAL|1<<ZONE_HIGHMEM)))
1374 return -EINVAL;
1375
1376 for (i = 0; i < MAX_NR_ZONES; i++) {
1377 if (!(zone & 1<<i))
1378 continue;
1379
1380 z = &NODE_DATA(node)->node_zones[i];
1381
1382 if (state)
1383 z->reclaim_pages = 1;
1384 else
1385 z->reclaim_pages = 0;
1386 }
1387
1388 return 0;
1389}