aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2014-01-27 17:03:46 -0500
committerIngo Molnar <mingo@kernel.org>2014-01-28 09:03:17 -0500
commit35664fd41e1c8cc4f0b89f6a51db5af39ba50640 (patch)
treebafac16f345f34d673d4d58f51a302137b4d7481 /kernel/sched/fair.c
parent7e2703e6099609adc93679c4d45cd6247f565971 (diff)
sched/numa: Do statistics calculation using local variables only
The current code in task_numa_placement calculates the difference between the old and the new value, but also temporarily stores half of the old value in the per-process variables. The NUMA balancing code looks at those per-process variables, and having other tasks temporarily see halved statistics could lead to unwanted numa migrations. This can be avoided by doing all the math in local variables. This change also simplifies the code a little. Signed-off-by: Rik van Riel <riel@redhat.com> Acked-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Chegu Vinod <chegu_vinod@hp.com> Link: http://lkml.kernel.org/r/1390860228-21539-8-git-send-email-riel@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8fc3a8234817..4c449907a10e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1513,12 +1513,9 @@ static void task_numa_placement(struct task_struct *p)
1513 long diff, f_diff, f_weight; 1513 long diff, f_diff, f_weight;
1514 1514
1515 i = task_faults_idx(nid, priv); 1515 i = task_faults_idx(nid, priv);
1516 diff = -p->numa_faults_memory[i];
1517 f_diff = -p->numa_faults_cpu[i];
1518 1516
1519 /* Decay existing window, copy faults since last scan */ 1517 /* Decay existing window, copy faults since last scan */
1520 p->numa_faults_memory[i] >>= 1; 1518 diff = p->numa_faults_buffer_memory[i] - p->numa_faults_memory[i] / 2;
1521 p->numa_faults_memory[i] += p->numa_faults_buffer_memory[i];
1522 fault_types[priv] += p->numa_faults_buffer_memory[i]; 1519 fault_types[priv] += p->numa_faults_buffer_memory[i];
1523 p->numa_faults_buffer_memory[i] = 0; 1520 p->numa_faults_buffer_memory[i] = 0;
1524 1521
@@ -1532,13 +1529,12 @@ static void task_numa_placement(struct task_struct *p)
1532 f_weight = div64_u64(runtime << 16, period + 1); 1529 f_weight = div64_u64(runtime << 16, period + 1);
1533 f_weight = (f_weight * p->numa_faults_buffer_cpu[i]) / 1530 f_weight = (f_weight * p->numa_faults_buffer_cpu[i]) /
1534 (total_faults + 1); 1531 (total_faults + 1);
1535 p->numa_faults_cpu[i] >>= 1; 1532 f_diff = f_weight - p->numa_faults_cpu[i] / 2;
1536 p->numa_faults_cpu[i] += f_weight;
1537 p->numa_faults_buffer_cpu[i] = 0; 1533 p->numa_faults_buffer_cpu[i] = 0;
1538 1534
1535 p->numa_faults_memory[i] += diff;
1536 p->numa_faults_cpu[i] += f_diff;
1539 faults += p->numa_faults_memory[i]; 1537 faults += p->numa_faults_memory[i];
1540 diff += p->numa_faults_memory[i];
1541 f_diff += p->numa_faults_cpu[i];
1542 p->total_numa_faults += diff; 1538 p->total_numa_faults += diff;
1543 if (p->numa_group) { 1539 if (p->numa_group) {
1544 /* safe because we can only change our own group */ 1540 /* safe because we can only change our own group */