aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-11-22 06:16:36 -0500
committerMel Gorman <mgorman@suse.de>2012-12-11 09:42:55 -0500
commit1a687c2e9a99335c9e77392f050fe607fa18a652 (patch)
tree06df958bfdfeaf9f38f333af106b55faa81f1c6b /kernel/sched/core.c
parentb8593bfda1652755136333cdd362de125b283a9c (diff)
mm: sched: numa: Control enabling and disabling of NUMA balancing
This patch adds Kconfig options and kernel parameters to allow the enabling and disabling of automatic NUMA balancing. The existance of such a switch was and is very important when debugging problems related to transparent hugepages and we should have the same for automatic NUMA placement. Signed-off-by: Mel Gorman <mgorman@suse.de>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c48
1 files changed, 33 insertions, 15 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9d255bc0e278..7a45015274ab 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -192,23 +192,10 @@ static void sched_feat_disable(int i) { };
192static void sched_feat_enable(int i) { }; 192static void sched_feat_enable(int i) { };
193#endif /* HAVE_JUMP_LABEL */ 193#endif /* HAVE_JUMP_LABEL */
194 194
195static ssize_t 195static int sched_feat_set(char *cmp)
196sched_feat_write(struct file *filp, const char __user *ubuf,
197 size_t cnt, loff_t *ppos)
198{ 196{
199 char buf[64];
200 char *cmp;
201 int neg = 0;
202 int i; 197 int i;
203 198 int neg = 0;
204 if (cnt > 63)
205 cnt = 63;
206
207 if (copy_from_user(&buf, ubuf, cnt))
208 return -EFAULT;
209
210 buf[cnt] = 0;
211 cmp = strstrip(buf);
212 199
213 if (strncmp(cmp, "NO_", 3) == 0) { 200 if (strncmp(cmp, "NO_", 3) == 0) {
214 neg = 1; 201 neg = 1;
@@ -228,6 +215,27 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
228 } 215 }
229 } 216 }
230 217
218 return i;
219}
220
221static ssize_t
222sched_feat_write(struct file *filp, const char __user *ubuf,
223 size_t cnt, loff_t *ppos)
224{
225 char buf[64];
226 char *cmp;
227 int i;
228
229 if (cnt > 63)
230 cnt = 63;
231
232 if (copy_from_user(&buf, ubuf, cnt))
233 return -EFAULT;
234
235 buf[cnt] = 0;
236 cmp = strstrip(buf);
237
238 i = sched_feat_set(cmp);
231 if (i == __SCHED_FEAT_NR) 239 if (i == __SCHED_FEAT_NR)
232 return -EINVAL; 240 return -EINVAL;
233 241
@@ -1549,6 +1557,16 @@ static void __sched_fork(struct task_struct *p)
1549#endif /* CONFIG_NUMA_BALANCING */ 1557#endif /* CONFIG_NUMA_BALANCING */
1550} 1558}
1551 1559
1560#ifdef CONFIG_NUMA_BALANCING
1561void set_numabalancing_state(bool enabled)
1562{
1563 if (enabled)
1564 sched_feat_set("NUMA");
1565 else
1566 sched_feat_set("NO_NUMA");
1567}
1568#endif /* CONFIG_NUMA_BALANCING */
1569
1552/* 1570/*
1553 * fork()/clone()-time setup: 1571 * fork()/clone()-time setup:
1554 */ 1572 */