aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorakpm@osdl.org <akpm@osdl.org>2005-06-21 20:14:35 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-21 21:46:13 -0400
commitb15e0905f2b9964fc7426fecab57445e96021b61 (patch)
treebc1b3606cf282f88cd6598de22190eff6708affa /mm/vmscan.c
parent39c715b71740c4a78ba4769fb54826929bac03cb (diff)
[PATCH] vmscan: notice slab shrinking
Fix a problem identified by Andrea Arcangeli <andrea@suse.de> kswapd will set a zone into all_unreclaimable state if it sees that we're not successfully reclaiming LRU pages. But that fails to notice that we're successfully reclaiming slab obects, so we can set all_unreclaimable too soon. So change shrink_slab() to return a success indication if it actually reclaimed some objects, and don't assume that the zone is all_unreclaimable if that is true. This means that we won't enter all_unreclaimable state if we are successfully freeing slab objects but we're not yet actually freeing slab pages, due to internal fragmentation. (hm, this has a shortcoming. We could be successfully freeing ZONE_NORMAL slab objects while being really oom on ZONE_DMA. If that happens then kswapd might burn a lot of CPU. But given that there might be some slab objects in ZONE_DMA, perhaps that is appropriate.) Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c19
1 files changed, 14 insertions, 5 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 269eded9b459..c62cadce0426 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -180,17 +180,20 @@ EXPORT_SYMBOL(remove_shrinker);
180 * `lru_pages' represents the number of on-LRU pages in all the zones which 180 * `lru_pages' represents the number of on-LRU pages in all the zones which
181 * are eligible for the caller's allocation attempt. It is used for balancing 181 * are eligible for the caller's allocation attempt. It is used for balancing
182 * slab reclaim versus page reclaim. 182 * slab reclaim versus page reclaim.
183 *
184 * Returns the number of slab objects which we shrunk.
183 */ 185 */
184static int shrink_slab(unsigned long scanned, unsigned int gfp_mask, 186static int shrink_slab(unsigned long scanned, unsigned int gfp_mask,
185 unsigned long lru_pages) 187 unsigned long lru_pages)
186{ 188{
187 struct shrinker *shrinker; 189 struct shrinker *shrinker;
190 int ret = 0;
188 191
189 if (scanned == 0) 192 if (scanned == 0)
190 scanned = SWAP_CLUSTER_MAX; 193 scanned = SWAP_CLUSTER_MAX;
191 194
192 if (!down_read_trylock(&shrinker_rwsem)) 195 if (!down_read_trylock(&shrinker_rwsem))
193 return 0; 196 return 1; /* Assume we'll be able to shrink next time */
194 197
195 list_for_each_entry(shrinker, &shrinker_list, list) { 198 list_for_each_entry(shrinker, &shrinker_list, list) {
196 unsigned long long delta; 199 unsigned long long delta;
@@ -209,10 +212,14 @@ static int shrink_slab(unsigned long scanned, unsigned int gfp_mask,
209 while (total_scan >= SHRINK_BATCH) { 212 while (total_scan >= SHRINK_BATCH) {
210 long this_scan = SHRINK_BATCH; 213 long this_scan = SHRINK_BATCH;
211 int shrink_ret; 214 int shrink_ret;
215 int nr_before;
212 216
217 nr_before = (*shrinker->shrinker)(0, gfp_mask);
213 shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask); 218 shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
214 if (shrink_ret == -1) 219 if (shrink_ret == -1)
215 break; 220 break;
221 if (shrink_ret < nr_before)
222 ret += nr_before - shrink_ret;
216 mod_page_state(slabs_scanned, this_scan); 223 mod_page_state(slabs_scanned, this_scan);
217 total_scan -= this_scan; 224 total_scan -= this_scan;
218 225
@@ -222,7 +229,7 @@ static int shrink_slab(unsigned long scanned, unsigned int gfp_mask,
222 shrinker->nr += total_scan; 229 shrinker->nr += total_scan;
223 } 230 }
224 up_read(&shrinker_rwsem); 231 up_read(&shrinker_rwsem);
225 return 0; 232 return ret;
226} 233}
227 234
228/* Called without lock on whether page is mapped, so answer is unstable */ 235/* Called without lock on whether page is mapped, so answer is unstable */
@@ -1079,6 +1086,7 @@ scan:
1079 */ 1086 */
1080 for (i = 0; i <= end_zone; i++) { 1087 for (i = 0; i <= end_zone; i++) {
1081 struct zone *zone = pgdat->node_zones + i; 1088 struct zone *zone = pgdat->node_zones + i;
1089 int nr_slab;
1082 1090
1083 if (zone->present_pages == 0) 1091 if (zone->present_pages == 0)
1084 continue; 1092 continue;
@@ -1100,14 +1108,15 @@ scan:
1100 sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; 1108 sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX;
1101 shrink_zone(zone, &sc); 1109 shrink_zone(zone, &sc);
1102 reclaim_state->reclaimed_slab = 0; 1110 reclaim_state->reclaimed_slab = 0;
1103 shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages); 1111 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1112 lru_pages);
1104 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 1113 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
1105 total_reclaimed += sc.nr_reclaimed; 1114 total_reclaimed += sc.nr_reclaimed;
1106 total_scanned += sc.nr_scanned; 1115 total_scanned += sc.nr_scanned;
1107 if (zone->all_unreclaimable) 1116 if (zone->all_unreclaimable)
1108 continue; 1117 continue;
1109 if (zone->pages_scanned >= (zone->nr_active + 1118 if (nr_slab == 0 && zone->pages_scanned >=
1110 zone->nr_inactive) * 4) 1119 (zone->nr_active + zone->nr_inactive) * 4)
1111 zone->all_unreclaimable = 1; 1120 zone->all_unreclaimable = 1;
1112 /* 1121 /*
1113 * If we've done a decent amount of scanning and 1122 * If we've done a decent amount of scanning and