aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-11-22 06:16:36 -0500
committerMel Gorman <mgorman@suse.de>2012-12-11 09:42:55 -0500
commit1a687c2e9a99335c9e77392f050fe607fa18a652 (patch)
tree06df958bfdfeaf9f38f333af106b55faa81f1c6b /kernel
parentb8593bfda1652755136333cdd362de125b283a9c (diff)
mm: sched: numa: Control enabling and disabling of NUMA balancing
This patch adds Kconfig options and kernel parameters to allow the enabling and disabling of automatic NUMA balancing. The existance of such a switch was and is very important when debugging problems related to transparent hugepages and we should have the same for automatic NUMA placement. Signed-off-by: Mel Gorman <mgorman@suse.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c48
-rw-r--r--kernel/sched/fair.c3
-rw-r--r--kernel/sched/features.h6
3 files changed, 40 insertions, 17 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9d255bc0e278..7a45015274ab 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -192,23 +192,10 @@ static void sched_feat_disable(int i) { };
192static void sched_feat_enable(int i) { }; 192static void sched_feat_enable(int i) { };
193#endif /* HAVE_JUMP_LABEL */ 193#endif /* HAVE_JUMP_LABEL */
194 194
195static ssize_t 195static int sched_feat_set(char *cmp)
196sched_feat_write(struct file *filp, const char __user *ubuf,
197 size_t cnt, loff_t *ppos)
198{ 196{
199 char buf[64];
200 char *cmp;
201 int neg = 0;
202 int i; 197 int i;
203 198 int neg = 0;
204 if (cnt > 63)
205 cnt = 63;
206
207 if (copy_from_user(&buf, ubuf, cnt))
208 return -EFAULT;
209
210 buf[cnt] = 0;
211 cmp = strstrip(buf);
212 199
213 if (strncmp(cmp, "NO_", 3) == 0) { 200 if (strncmp(cmp, "NO_", 3) == 0) {
214 neg = 1; 201 neg = 1;
@@ -228,6 +215,27 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
228 } 215 }
229 } 216 }
230 217
218 return i;
219}
220
221static ssize_t
222sched_feat_write(struct file *filp, const char __user *ubuf,
223 size_t cnt, loff_t *ppos)
224{
225 char buf[64];
226 char *cmp;
227 int i;
228
229 if (cnt > 63)
230 cnt = 63;
231
232 if (copy_from_user(&buf, ubuf, cnt))
233 return -EFAULT;
234
235 buf[cnt] = 0;
236 cmp = strstrip(buf);
237
238 i = sched_feat_set(cmp);
231 if (i == __SCHED_FEAT_NR) 239 if (i == __SCHED_FEAT_NR)
232 return -EINVAL; 240 return -EINVAL;
233 241
@@ -1549,6 +1557,16 @@ static void __sched_fork(struct task_struct *p)
1549#endif /* CONFIG_NUMA_BALANCING */ 1557#endif /* CONFIG_NUMA_BALANCING */
1550} 1558}
1551 1559
1560#ifdef CONFIG_NUMA_BALANCING
1561void set_numabalancing_state(bool enabled)
1562{
1563 if (enabled)
1564 sched_feat_set("NUMA");
1565 else
1566 sched_feat_set("NO_NUMA");
1567}
1568#endif /* CONFIG_NUMA_BALANCING */
1569
1552/* 1570/*
1553 * fork()/clone()-time setup: 1571 * fork()/clone()-time setup:
1554 */ 1572 */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4b577863933f..7a02a2082e95 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -811,6 +811,9 @@ void task_numa_fault(int node, int pages, bool migrated)
811{ 811{
812 struct task_struct *p = current; 812 struct task_struct *p = current;
813 813
814 if (!sched_feat_numa(NUMA))
815 return;
816
814 /* FIXME: Allocate task-specific structure for placement policy here */ 817 /* FIXME: Allocate task-specific structure for placement policy here */
815 818
816 /* 819 /*
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 5fb7aefbec80..d2373a3e3252 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -63,8 +63,10 @@ SCHED_FEAT(RT_RUNTIME_SHARE, true)
63SCHED_FEAT(LB_MIN, false) 63SCHED_FEAT(LB_MIN, false)
64 64
65/* 65/*
66 * Apply the automatic NUMA scheduling policy 66 * Apply the automatic NUMA scheduling policy. Enabled automatically
67 * at runtime if running on a NUMA machine. Can be controlled via
68 * numa_balancing=
67 */ 69 */
68#ifdef CONFIG_NUMA_BALANCING 70#ifdef CONFIG_NUMA_BALANCING
69SCHED_FEAT(NUMA, true) 71SCHED_FEAT(NUMA, false)
70#endif 72#endif