aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-03-18 02:40:47 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:16:38 -0500
commitb52439c22c63dbbefd5395f2151c0ef4f667e949 (patch)
treeff6671cab70dfaed00cf19367a6a71b1cda0cdf4
parent05f9ca83596c7801549a2b4eba469d51baf5480f (diff)
[SPARC64]: Don't kill the page allocator when growing a TSB.
Try only lightly on > 1 order allocations. If a grow fails, we are under memory pressure, so do not try to grow the TSB for this address space any more. If a > 0 order TSB allocation fails on a new fork, retry using a 0 order allocation. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/mm/tsb.c43
1 files changed, 35 insertions, 8 deletions
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 7fbe1e0cd105..3eb8670282fd 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -216,7 +216,8 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
216 * 216 *
217 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers 217 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
218 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB 218 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
219 * must be 512K aligned. 219 * must be 512K aligned. It also must be physically contiguous, so we
220 * cannot use vmalloc().
220 * 221 *
221 * The idea here is to grow the TSB when the RSS of the process approaches 222 * The idea here is to grow the TSB when the RSS of the process approaches
222 * the number of entries that the current TSB can hold at once. Currently, 223 * the number of entries that the current TSB can hold at once. Currently,
@@ -228,6 +229,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
228 unsigned long size, old_size, flags; 229 unsigned long size, old_size, flags;
229 struct page *page; 230 struct page *page;
230 struct tsb *old_tsb, *new_tsb; 231 struct tsb *old_tsb, *new_tsb;
232 unsigned long order, new_rss_limit;
233 gfp_t gfp_flags;
231 234
232 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) 235 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
233 max_tsb_size = (PAGE_SIZE << MAX_ORDER); 236 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
@@ -240,9 +243,37 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
240 break; 243 break;
241 } 244 }
242 245
243 page = alloc_pages(GFP_KERNEL, get_order(size)); 246 if (size == max_tsb_size)
244 if (unlikely(!page)) 247 new_rss_limit = ~0UL;
248 else
249 new_rss_limit = ((size / sizeof(struct tsb)) * 3) / 4;
250
251retry_page_alloc:
252 order = get_order(size);
253 gfp_flags = GFP_KERNEL;
254 if (order > 1)
255 gfp_flags = __GFP_NOWARN | __GFP_NORETRY;
256
257 page = alloc_pages(gfp_flags, order);
258 if (unlikely(!page)) {
259 /* Not being able to fork due to a high-order TSB
260 * allocation failure is very bad behavior. Just back
261 * down to a 0-order allocation and force no TSB
262 * growing for this address space.
263 */
264 if (mm->context.tsb == NULL && order > 0) {
265 size = PAGE_SIZE;
266 new_rss_limit = ~0UL;
267 goto retry_page_alloc;
268 }
269
270 /* If we failed on a TSB grow, we are under serious
271 * memory pressure so don't try to grow any more.
272 */
273 if (mm->context.tsb != NULL)
274 mm->context.tsb_rss_limit = ~0UL;
245 return; 275 return;
276 }
246 277
247 /* Mark all tags as invalid. */ 278 /* Mark all tags as invalid. */
248 new_tsb = page_address(page); 279 new_tsb = page_address(page);
@@ -286,11 +317,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
286 return; 317 return;
287 } 318 }
288 319
289 if (size == max_tsb_size) 320 mm->context.tsb_rss_limit = new_rss_limit;
290 mm->context.tsb_rss_limit = ~0UL;
291 else
292 mm->context.tsb_rss_limit =
293 ((size / sizeof(struct tsb)) * 3) / 4;
294 321
295 if (old_tsb) { 322 if (old_tsb) {
296 extern void copy_tsb(unsigned long old_tsb_base, 323 extern void copy_tsb(unsigned long old_tsb_base,