aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorIzik Eidus <ieidus@redhat.com>2009-09-23 18:56:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-24 10:20:56 -0400
commit2c6854fdadf940678fd54779b778f6faafb870bb (patch)
tree0ed3efb3651813593e38e7976d1201a738b300a6 /mm
parentd2b5ec3aa0784335f031239e71fb50924cac9e0d (diff)
ksm: change default values to better fit into mainline kernel
Now that ksm is in mainline it is better to change the default values to better fit to most of the users. This patch change the ksm default values to be: ksm_thread_pages_to_scan = 100 (instead of 200) ksm_thread_sleep_millisecs = 20 (like before) ksm_run = KSM_RUN_STOP (instead of KSM_RUN_MERGE - meaning ksm is disabled by default) ksm_max_kernel_pages = nr_free_buffer_pages / 4 (instead of 2046) The important aspect of this patch is: it disables ksm by default, and sets the number of the kernel_pages that can be allocated to be a reasonable number. Signed-off-by: Izik Eidus <ieidus@redhat.com> Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/ksm.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index 37cc37325094..f7edac356f46 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -30,6 +30,7 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/rbtree.h> 31#include <linux/rbtree.h>
32#include <linux/mmu_notifier.h> 32#include <linux/mmu_notifier.h>
33#include <linux/swap.h>
33#include <linux/ksm.h> 34#include <linux/ksm.h>
34 35
35#include <asm/tlbflush.h> 36#include <asm/tlbflush.h>
@@ -162,10 +163,10 @@ static unsigned long ksm_pages_unshared;
162static unsigned long ksm_rmap_items; 163static unsigned long ksm_rmap_items;
163 164
164/* Limit on the number of unswappable pages used */ 165/* Limit on the number of unswappable pages used */
165static unsigned long ksm_max_kernel_pages = 2000; 166static unsigned long ksm_max_kernel_pages;
166 167
167/* Number of pages ksmd should scan in one batch */ 168/* Number of pages ksmd should scan in one batch */
168static unsigned int ksm_thread_pages_to_scan = 200; 169static unsigned int ksm_thread_pages_to_scan = 100;
169 170
170/* Milliseconds ksmd should sleep between batches */ 171/* Milliseconds ksmd should sleep between batches */
171static unsigned int ksm_thread_sleep_millisecs = 20; 172static unsigned int ksm_thread_sleep_millisecs = 20;
@@ -173,7 +174,7 @@ static unsigned int ksm_thread_sleep_millisecs = 20;
173#define KSM_RUN_STOP 0 174#define KSM_RUN_STOP 0
174#define KSM_RUN_MERGE 1 175#define KSM_RUN_MERGE 1
175#define KSM_RUN_UNMERGE 2 176#define KSM_RUN_UNMERGE 2
176static unsigned int ksm_run = KSM_RUN_MERGE; 177static unsigned int ksm_run = KSM_RUN_STOP;
177 178
178static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); 179static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
179static DEFINE_MUTEX(ksm_thread_mutex); 180static DEFINE_MUTEX(ksm_thread_mutex);
@@ -183,6 +184,11 @@ static DEFINE_SPINLOCK(ksm_mmlist_lock);
183 sizeof(struct __struct), __alignof__(struct __struct),\ 184 sizeof(struct __struct), __alignof__(struct __struct),\
184 (__flags), NULL) 185 (__flags), NULL)
185 186
187static void __init ksm_init_max_kernel_pages(void)
188{
189 ksm_max_kernel_pages = nr_free_buffer_pages() / 4;
190}
191
186static int __init ksm_slab_init(void) 192static int __init ksm_slab_init(void)
187{ 193{
188 rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); 194 rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
@@ -1667,6 +1673,8 @@ static int __init ksm_init(void)
1667 struct task_struct *ksm_thread; 1673 struct task_struct *ksm_thread;
1668 int err; 1674 int err;
1669 1675
1676 ksm_init_max_kernel_pages();
1677
1670 err = ksm_slab_init(); 1678 err = ksm_slab_init();
1671 if (err) 1679 if (err)
1672 goto out; 1680 goto out;