aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@engr.sgi.com>2006-03-22 03:08:22 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 10:54:00 -0500
commit7fb2d46d396b2491818f8e43b01049b3234e6c07 (patch)
treed5826a4d1c3b1853be20c6049311eb52f1a98360 /mm
parent1742f19fa920cdd6905f0db5898524dde22ab2a4 (diff)
[PATCH] zone_reclaim: additional comments and cleanup
Add some comments to explain how zone reclaim works. And it fixes the following issues: - PF_SWAPWRITE needs to be set for RECLAIM_SWAP to be able to write out pages to swap. Currently RECLAIM_SWAP may not do that. - remove setting nr_reclaimed pages after slab reclaim since the slab shrinking code does not use that and the nr_reclaimed pages is just right for the intended follow up action. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2d5d4864de88..c712b946e4ff 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1887,6 +1887,7 @@ int zone_reclaim_interval __read_mostly = 30*HZ;
1887 */ 1887 */
1888static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 1888static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1889{ 1889{
1890 /* Minimum pages needed in order to stay on node */
1890 const unsigned long nr_pages = 1 << order; 1891 const unsigned long nr_pages = 1 << order;
1891 struct task_struct *p = current; 1892 struct task_struct *p = current;
1892 struct reclaim_state reclaim_state; 1893 struct reclaim_state reclaim_state;
@@ -1924,9 +1925,12 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1924 1925
1925 if (nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) { 1926 if (nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) {
1926 /* 1927 /*
1927 * shrink_slab does not currently allow us to determine 1928 * shrink_slab() does not currently allow us to determine how
1928 * how many pages were freed in the zone. So we just 1929 * many pages were freed in this zone. So we just shake the slab
1929 * shake the slab and then go offnode for a single allocation. 1930 * a bit and then go off node for this particular allocation
1931 * despite possibly having freed enough memory to allocate in
1932 * this zone. If we freed local memory then the next
1933 * allocations will be local again.
1930 * 1934 *
1931 * shrink_slab will free memory on all zones and may take 1935 * shrink_slab will free memory on all zones and may take
1932 * a long time. 1936 * a long time.
@@ -1937,8 +1941,14 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1937 p->reclaim_state = NULL; 1941 p->reclaim_state = NULL;
1938 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); 1942 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
1939 1943
1940 if (nr_reclaimed == 0) 1944 if (nr_reclaimed == 0) {
1945 /*
1946 * We were unable to reclaim enough pages to stay on node. We
1947 * now allow off node accesses for a certain time period before
1948 * trying again to reclaim pages from the local zone.
1949 */
1941 zone->last_unsuccessful_zone_reclaim = jiffies; 1950 zone->last_unsuccessful_zone_reclaim = jiffies;
1951 }
1942 1952
1943 return nr_reclaimed >= nr_pages; 1953 return nr_reclaimed >= nr_pages;
1944} 1954}