aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorWolfgang Wander <wwc@rentec.com>2005-06-21 20:14:49 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-21 21:46:16 -0400
commit1363c3cd8603a913a27e2995dccbd70d5312d8e6 (patch)
tree405e7fc1ef44678f3ca0a54c536d0457e6e80f45 /include/linux
parente7c8d5c9955a4d2e88e36b640563f5d6d5aba48a (diff)
[PATCH] Avoiding mmap fragmentation
Ingo recently introduced a great speedup for allocating new mmaps using the free_area_cache pointer which boosts the specweb SSL benchmark by 4-5% and causes huge performance increases in thread creation. The downside of this patch is that it does lead to fragmentation in the mmap-ed areas (visible via /proc/self/maps), such that some applications that work fine under 2.4 kernels quickly run out of memory on any 2.6 kernel. The problem is twofold: 1) the free_area_cache is used to continue a search for memory where the last search ended. Before the change new areas were always searched from the base address on. So now new small areas are cluttering holes of all sizes throughout the whole mmap-able region whereas before small holes tended to close holes near the base leaving holes far from the base large and available for larger requests. 2) the free_area_cache also is set to the location of the last munmap-ed area so in scenarios where we allocate e.g. five regions of 1K each, then free regions 4 2 3 in this order the next request for 1K will be placed in the position of the old region 3, whereas before we appended it to the still active region 1, placing it at the location of the old region 2. Before we had 1 free region of 2K, now we only get two free regions of 1K -> fragmentation. The patch addresses thes issues by introducing yet another cache descriptor cached_hole_size that contains the largest known hole size below the current free_area_cache. If a new request comes in the size is compared against the cached_hole_size and if the request can be filled with a hole below free_area_cache the search is started from the base instead. The results look promising: Whereas 2.6.12-rc4 fragments quickly and my (earlier posted) leakme.c test program terminates after 50000+ iterations with 96 distinct and fragmented maps in /proc/self/maps it performs nicely (as expected) with thread creation, Ingo's test_str02 with 20000 threads requires 0.7s system time. Taking out Ingo's patch (un-patch available per request) by basically deleting all mentions of free_area_cache from the kernel and starting the search for new memory always at the respective bases we observe: leakme terminates successfully with 11 distinctive hardly fragmented areas in /proc/self/maps but thread creating is gringdingly slow: 30+s(!) system time for Ingo's test_str02 with 20000 threads. Now - drumroll ;-) the appended patch works fine with leakme: it ends with only 7 distinct areas in /proc/self/maps and also thread creation seems sufficiently fast with 0.71s for 20000 threads. Signed-off-by: Wolfgang Wander <wwc@rentec.com> Credit-to: "Richard Purdie" <rpurdie@rpsys.net> Signed-off-by: Ken Chen <kenneth.w.chen@intel.com> Acked-by: Ingo Molnar <mingo@elte.hu> (partly) Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/sched.h11
1 files changed, 6 insertions, 5 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4dbb109022f3..b58afd97a180 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -201,8 +201,8 @@ extern unsigned long
201arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 201arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
202 unsigned long len, unsigned long pgoff, 202 unsigned long len, unsigned long pgoff,
203 unsigned long flags); 203 unsigned long flags);
204extern void arch_unmap_area(struct vm_area_struct *area); 204extern void arch_unmap_area(struct mm_struct *, unsigned long);
205extern void arch_unmap_area_topdown(struct vm_area_struct *area); 205extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
206 206
207#define set_mm_counter(mm, member, value) (mm)->_##member = (value) 207#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
208#define get_mm_counter(mm, member) ((mm)->_##member) 208#define get_mm_counter(mm, member) ((mm)->_##member)
@@ -218,9 +218,10 @@ struct mm_struct {
218 unsigned long (*get_unmapped_area) (struct file *filp, 218 unsigned long (*get_unmapped_area) (struct file *filp,
219 unsigned long addr, unsigned long len, 219 unsigned long addr, unsigned long len,
220 unsigned long pgoff, unsigned long flags); 220 unsigned long pgoff, unsigned long flags);
221 void (*unmap_area) (struct vm_area_struct *area); 221 void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
222 unsigned long mmap_base; /* base of mmap area */ 222 unsigned long mmap_base; /* base of mmap area */
223 unsigned long free_area_cache; /* first hole */ 223 unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
224 unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */
224 pgd_t * pgd; 225 pgd_t * pgd;
225 atomic_t mm_users; /* How many users with user space? */ 226 atomic_t mm_users; /* How many users with user space? */
226 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ 227 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */