diff options
author | Rik van Riel <riel@redhat.com> | 2014-01-27 17:03:41 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-01-28 07:17:05 -0500 |
commit | ff1df896aef8e0ec1556a5c44f424bd45bfa2cbe (patch) | |
tree | 54bdc13838a9f9b2580d6afdc9fa6410164b798b | |
parent | 52bf84aa206cd2c2516dfa3e03b578edf8a3242f (diff) |
sched/numa: Rename p->numa_faults to numa_faults_memory
In order to get a more consistent naming scheme, making it clear
which fault statistics track memory locality, and which track
CPU locality, rename the memory fault statistics.
Suggested-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: Rik van Riel <riel@redhat.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Chegu Vinod <chegu_vinod@hp.com>
Link: http://lkml.kernel.org/r/1390860228-21539-3-git-send-email-riel@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | include/linux/sched.h | 8 | ||||
-rw-r--r-- | kernel/sched/core.c | 4 | ||||
-rw-r--r-- | kernel/sched/debug.c | 6 | ||||
-rw-r--r-- | kernel/sched/fair.c | 56 |
4 files changed, 37 insertions, 37 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index d572d5ba650f..144d509df053 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1469,15 +1469,15 @@ struct task_struct { | |||
1469 | * Scheduling placement decisions are made based on the these counts. | 1469 | * Scheduling placement decisions are made based on the these counts. |
1470 | * The values remain static for the duration of a PTE scan | 1470 | * The values remain static for the duration of a PTE scan |
1471 | */ | 1471 | */ |
1472 | unsigned long *numa_faults; | 1472 | unsigned long *numa_faults_memory; |
1473 | unsigned long total_numa_faults; | 1473 | unsigned long total_numa_faults; |
1474 | 1474 | ||
1475 | /* | 1475 | /* |
1476 | * numa_faults_buffer records faults per node during the current | 1476 | * numa_faults_buffer records faults per node during the current |
1477 | * scan window. When the scan completes, the counts in numa_faults | 1477 | * scan window. When the scan completes, the counts in |
1478 | * decay and these values are copied. | 1478 | * numa_faults_memory decay and these values are copied. |
1479 | */ | 1479 | */ |
1480 | unsigned long *numa_faults_buffer; | 1480 | unsigned long *numa_faults_buffer_memory; |
1481 | 1481 | ||
1482 | /* | 1482 | /* |
1483 | * numa_faults_locality tracks if faults recorded during the last | 1483 | * numa_faults_locality tracks if faults recorded during the last |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 81343d6bd9cb..bc708c53bf03 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1744,8 +1744,8 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) | |||
1744 | p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; | 1744 | p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; |
1745 | p->numa_scan_period = sysctl_numa_balancing_scan_delay; | 1745 | p->numa_scan_period = sysctl_numa_balancing_scan_delay; |
1746 | p->numa_work.next = &p->numa_work; | 1746 | p->numa_work.next = &p->numa_work; |
1747 | p->numa_faults = NULL; | 1747 | p->numa_faults_memory = NULL; |
1748 | p->numa_faults_buffer = NULL; | 1748 | p->numa_faults_buffer_memory = NULL; |
1749 | 1749 | ||
1750 | INIT_LIST_HEAD(&p->numa_entry); | 1750 | INIT_LIST_HEAD(&p->numa_entry); |
1751 | p->numa_group = NULL; | 1751 | p->numa_group = NULL; |
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index dd52e7ffb10e..31b908daaa1b 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c | |||
@@ -533,15 +533,15 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m) | |||
533 | unsigned long nr_faults = -1; | 533 | unsigned long nr_faults = -1; |
534 | int cpu_current, home_node; | 534 | int cpu_current, home_node; |
535 | 535 | ||
536 | if (p->numa_faults) | 536 | if (p->numa_faults_memory) |
537 | nr_faults = p->numa_faults[2*node + i]; | 537 | nr_faults = p->numa_faults_memory[2*node + i]; |
538 | 538 | ||
539 | cpu_current = !i ? (task_node(p) == node) : | 539 | cpu_current = !i ? (task_node(p) == node) : |
540 | (pol && node_isset(node, pol->v.nodes)); | 540 | (pol && node_isset(node, pol->v.nodes)); |
541 | 541 | ||
542 | home_node = (p->numa_preferred_nid == node); | 542 | home_node = (p->numa_preferred_nid == node); |
543 | 543 | ||
544 | SEQ_printf(m, "numa_faults, %d, %d, %d, %d, %ld\n", | 544 | SEQ_printf(m, "numa_faults_memory, %d, %d, %d, %d, %ld\n", |
545 | i, node, cpu_current, home_node, nr_faults); | 545 | i, node, cpu_current, home_node, nr_faults); |
546 | } | 546 | } |
547 | } | 547 | } |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7cdde913b4dc..3e616d704f67 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -901,11 +901,11 @@ static inline int task_faults_idx(int nid, int priv) | |||
901 | 901 | ||
902 | static inline unsigned long task_faults(struct task_struct *p, int nid) | 902 | static inline unsigned long task_faults(struct task_struct *p, int nid) |
903 | { | 903 | { |
904 | if (!p->numa_faults) | 904 | if (!p->numa_faults_memory) |
905 | return 0; | 905 | return 0; |
906 | 906 | ||
907 | return p->numa_faults[task_faults_idx(nid, 0)] + | 907 | return p->numa_faults_memory[task_faults_idx(nid, 0)] + |
908 | p->numa_faults[task_faults_idx(nid, 1)]; | 908 | p->numa_faults_memory[task_faults_idx(nid, 1)]; |
909 | } | 909 | } |
910 | 910 | ||
911 | static inline unsigned long group_faults(struct task_struct *p, int nid) | 911 | static inline unsigned long group_faults(struct task_struct *p, int nid) |
@@ -927,7 +927,7 @@ static inline unsigned long task_weight(struct task_struct *p, int nid) | |||
927 | { | 927 | { |
928 | unsigned long total_faults; | 928 | unsigned long total_faults; |
929 | 929 | ||
930 | if (!p->numa_faults) | 930 | if (!p->numa_faults_memory) |
931 | return 0; | 931 | return 0; |
932 | 932 | ||
933 | total_faults = p->total_numa_faults; | 933 | total_faults = p->total_numa_faults; |
@@ -1255,7 +1255,7 @@ static int task_numa_migrate(struct task_struct *p) | |||
1255 | static void numa_migrate_preferred(struct task_struct *p) | 1255 | static void numa_migrate_preferred(struct task_struct *p) |
1256 | { | 1256 | { |
1257 | /* This task has no NUMA fault statistics yet */ | 1257 | /* This task has no NUMA fault statistics yet */ |
1258 | if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults)) | 1258 | if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults_memory)) |
1259 | return; | 1259 | return; |
1260 | 1260 | ||
1261 | /* Periodically retry migrating the task to the preferred node */ | 1261 | /* Periodically retry migrating the task to the preferred node */ |
@@ -1371,16 +1371,16 @@ static void task_numa_placement(struct task_struct *p) | |||
1371 | long diff; | 1371 | long diff; |
1372 | 1372 | ||
1373 | i = task_faults_idx(nid, priv); | 1373 | i = task_faults_idx(nid, priv); |
1374 | diff = -p->numa_faults[i]; | 1374 | diff = -p->numa_faults_memory[i]; |
1375 | 1375 | ||
1376 | /* Decay existing window, copy faults since last scan */ | 1376 | /* Decay existing window, copy faults since last scan */ |
1377 | p->numa_faults[i] >>= 1; | 1377 | p->numa_faults_memory[i] >>= 1; |
1378 | p->numa_faults[i] += p->numa_faults_buffer[i]; | 1378 | p->numa_faults_memory[i] += p->numa_faults_buffer_memory[i]; |
1379 | fault_types[priv] += p->numa_faults_buffer[i]; | 1379 | fault_types[priv] += p->numa_faults_buffer_memory[i]; |
1380 | p->numa_faults_buffer[i] = 0; | 1380 | p->numa_faults_buffer_memory[i] = 0; |
1381 | 1381 | ||
1382 | faults += p->numa_faults[i]; | 1382 | faults += p->numa_faults_memory[i]; |
1383 | diff += p->numa_faults[i]; | 1383 | diff += p->numa_faults_memory[i]; |
1384 | p->total_numa_faults += diff; | 1384 | p->total_numa_faults += diff; |
1385 | if (p->numa_group) { | 1385 | if (p->numa_group) { |
1386 | /* safe because we can only change our own group */ | 1386 | /* safe because we can only change our own group */ |
@@ -1465,7 +1465,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags, | |||
1465 | grp->gid = p->pid; | 1465 | grp->gid = p->pid; |
1466 | 1466 | ||
1467 | for (i = 0; i < 2*nr_node_ids; i++) | 1467 | for (i = 0; i < 2*nr_node_ids; i++) |
1468 | grp->faults[i] = p->numa_faults[i]; | 1468 | grp->faults[i] = p->numa_faults_memory[i]; |
1469 | 1469 | ||
1470 | grp->total_faults = p->total_numa_faults; | 1470 | grp->total_faults = p->total_numa_faults; |
1471 | 1471 | ||
@@ -1523,8 +1523,8 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags, | |||
1523 | double_lock(&my_grp->lock, &grp->lock); | 1523 | double_lock(&my_grp->lock, &grp->lock); |
1524 | 1524 | ||
1525 | for (i = 0; i < 2*nr_node_ids; i++) { | 1525 | for (i = 0; i < 2*nr_node_ids; i++) { |
1526 | my_grp->faults[i] -= p->numa_faults[i]; | 1526 | my_grp->faults[i] -= p->numa_faults_memory[i]; |
1527 | grp->faults[i] += p->numa_faults[i]; | 1527 | grp->faults[i] += p->numa_faults_memory[i]; |
1528 | } | 1528 | } |
1529 | my_grp->total_faults -= p->total_numa_faults; | 1529 | my_grp->total_faults -= p->total_numa_faults; |
1530 | grp->total_faults += p->total_numa_faults; | 1530 | grp->total_faults += p->total_numa_faults; |
@@ -1550,12 +1550,12 @@ void task_numa_free(struct task_struct *p) | |||
1550 | { | 1550 | { |
1551 | struct numa_group *grp = p->numa_group; | 1551 | struct numa_group *grp = p->numa_group; |
1552 | int i; | 1552 | int i; |
1553 | void *numa_faults = p->numa_faults; | 1553 | void *numa_faults = p->numa_faults_memory; |
1554 | 1554 | ||
1555 | if (grp) { | 1555 | if (grp) { |
1556 | spin_lock(&grp->lock); | 1556 | spin_lock(&grp->lock); |
1557 | for (i = 0; i < 2*nr_node_ids; i++) | 1557 | for (i = 0; i < 2*nr_node_ids; i++) |
1558 | grp->faults[i] -= p->numa_faults[i]; | 1558 | grp->faults[i] -= p->numa_faults_memory[i]; |
1559 | grp->total_faults -= p->total_numa_faults; | 1559 | grp->total_faults -= p->total_numa_faults; |
1560 | 1560 | ||
1561 | list_del(&p->numa_entry); | 1561 | list_del(&p->numa_entry); |
@@ -1565,8 +1565,8 @@ void task_numa_free(struct task_struct *p) | |||
1565 | put_numa_group(grp); | 1565 | put_numa_group(grp); |
1566 | } | 1566 | } |
1567 | 1567 | ||
1568 | p->numa_faults = NULL; | 1568 | p->numa_faults_memory = NULL; |
1569 | p->numa_faults_buffer = NULL; | 1569 | p->numa_faults_buffer_memory = NULL; |
1570 | kfree(numa_faults); | 1570 | kfree(numa_faults); |
1571 | } | 1571 | } |
1572 | 1572 | ||
@@ -1591,16 +1591,16 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags) | |||
1591 | return; | 1591 | return; |
1592 | 1592 | ||
1593 | /* Allocate buffer to track faults on a per-node basis */ | 1593 | /* Allocate buffer to track faults on a per-node basis */ |
1594 | if (unlikely(!p->numa_faults)) { | 1594 | if (unlikely(!p->numa_faults_memory)) { |
1595 | int size = sizeof(*p->numa_faults) * 2 * nr_node_ids; | 1595 | int size = sizeof(*p->numa_faults_memory) * 2 * nr_node_ids; |
1596 | 1596 | ||
1597 | /* numa_faults and numa_faults_buffer share the allocation */ | 1597 | /* numa_faults and numa_faults_buffer share the allocation */ |
1598 | p->numa_faults = kzalloc(size * 2, GFP_KERNEL|__GFP_NOWARN); | 1598 | p->numa_faults_memory = kzalloc(size * 2, GFP_KERNEL|__GFP_NOWARN); |
1599 | if (!p->numa_faults) | 1599 | if (!p->numa_faults_memory) |
1600 | return; | 1600 | return; |
1601 | 1601 | ||
1602 | BUG_ON(p->numa_faults_buffer); | 1602 | BUG_ON(p->numa_faults_buffer_memory); |
1603 | p->numa_faults_buffer = p->numa_faults + (2 * nr_node_ids); | 1603 | p->numa_faults_buffer_memory = p->numa_faults_memory + (2 * nr_node_ids); |
1604 | p->total_numa_faults = 0; | 1604 | p->total_numa_faults = 0; |
1605 | memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); | 1605 | memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); |
1606 | } | 1606 | } |
@@ -1629,7 +1629,7 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags) | |||
1629 | if (migrated) | 1629 | if (migrated) |
1630 | p->numa_pages_migrated += pages; | 1630 | p->numa_pages_migrated += pages; |
1631 | 1631 | ||
1632 | p->numa_faults_buffer[task_faults_idx(node, priv)] += pages; | 1632 | p->numa_faults_buffer_memory[task_faults_idx(node, priv)] += pages; |
1633 | p->numa_faults_locality[!!(flags & TNF_FAULT_LOCAL)] += pages; | 1633 | p->numa_faults_locality[!!(flags & TNF_FAULT_LOCAL)] += pages; |
1634 | } | 1634 | } |
1635 | 1635 | ||
@@ -4771,7 +4771,7 @@ static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env) | |||
4771 | { | 4771 | { |
4772 | int src_nid, dst_nid; | 4772 | int src_nid, dst_nid; |
4773 | 4773 | ||
4774 | if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults || | 4774 | if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults_memory || |
4775 | !(env->sd->flags & SD_NUMA)) { | 4775 | !(env->sd->flags & SD_NUMA)) { |
4776 | return false; | 4776 | return false; |
4777 | } | 4777 | } |
@@ -4802,7 +4802,7 @@ static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env) | |||
4802 | if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER)) | 4802 | if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER)) |
4803 | return false; | 4803 | return false; |
4804 | 4804 | ||
4805 | if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) | 4805 | if (!p->numa_faults_memory || !(env->sd->flags & SD_NUMA)) |
4806 | return false; | 4806 | return false; |
4807 | 4807 | ||
4808 | src_nid = cpu_to_node(env->src_cpu); | 4808 | src_nid = cpu_to_node(env->src_cpu); |