diff options
author | Rik van Riel <riel@redhat.com> | 2014-01-27 17:03:48 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-01-28 09:03:21 -0500 |
commit | be1e4e760d940c14d119bffef5eb007dfdf29046 (patch) | |
tree | 36991448598810e182371e785dd496ba21a07a38 | |
parent | 58b46da336a9312b2e21bb576d1c2c484dbf6257 (diff) |
sched/numa: Turn some magic numbers into #defines
Cleanup suggested by Mel Gorman. Now the code contains some more
hints on what statistics go where.
Suggested-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: Rik van Riel <riel@redhat.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Chegu Vinod <chegu_vinod@hp.com>
Link: http://lkml.kernel.org/r/1390860228-21539-10-git-send-email-riel@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/sched/fair.c | 34 |
1 files changed, 25 insertions, 9 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d5832c367d87..1f41b122198e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -896,6 +896,15 @@ struct numa_group { | |||
896 | unsigned long faults[0]; | 896 | unsigned long faults[0]; |
897 | }; | 897 | }; |
898 | 898 | ||
899 | /* Shared or private faults. */ | ||
900 | #define NR_NUMA_HINT_FAULT_TYPES 2 | ||
901 | |||
902 | /* Memory and CPU locality */ | ||
903 | #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2) | ||
904 | |||
905 | /* Averaged statistics, and temporary buffers. */ | ||
906 | #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2) | ||
907 | |||
899 | pid_t task_numa_group_id(struct task_struct *p) | 908 | pid_t task_numa_group_id(struct task_struct *p) |
900 | { | 909 | { |
901 | return p->numa_group ? p->numa_group->gid : 0; | 910 | return p->numa_group ? p->numa_group->gid : 0; |
@@ -903,7 +912,7 @@ pid_t task_numa_group_id(struct task_struct *p) | |||
903 | 912 | ||
904 | static inline int task_faults_idx(int nid, int priv) | 913 | static inline int task_faults_idx(int nid, int priv) |
905 | { | 914 | { |
906 | return 2 * nid + priv; | 915 | return NR_NUMA_HINT_FAULT_TYPES * nid + priv; |
907 | } | 916 | } |
908 | 917 | ||
909 | static inline unsigned long task_faults(struct task_struct *p, int nid) | 918 | static inline unsigned long task_faults(struct task_struct *p, int nid) |
@@ -1509,7 +1518,7 @@ static void task_numa_placement(struct task_struct *p) | |||
1509 | unsigned long faults = 0, group_faults = 0; | 1518 | unsigned long faults = 0, group_faults = 0; |
1510 | int priv, i; | 1519 | int priv, i; |
1511 | 1520 | ||
1512 | for (priv = 0; priv < 2; priv++) { | 1521 | for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) { |
1513 | long diff, f_diff, f_weight; | 1522 | long diff, f_diff, f_weight; |
1514 | 1523 | ||
1515 | i = task_faults_idx(nid, priv); | 1524 | i = task_faults_idx(nid, priv); |
@@ -1620,11 +1629,12 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags, | |||
1620 | INIT_LIST_HEAD(&grp->task_list); | 1629 | INIT_LIST_HEAD(&grp->task_list); |
1621 | grp->gid = p->pid; | 1630 | grp->gid = p->pid; |
1622 | /* Second half of the array tracks nids where faults happen */ | 1631 | /* Second half of the array tracks nids where faults happen */ |
1623 | grp->faults_cpu = grp->faults + 2 * nr_node_ids; | 1632 | grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES * |
1633 | nr_node_ids; | ||
1624 | 1634 | ||
1625 | node_set(task_node(current), grp->active_nodes); | 1635 | node_set(task_node(current), grp->active_nodes); |
1626 | 1636 | ||
1627 | for (i = 0; i < 4*nr_node_ids; i++) | 1637 | for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) |
1628 | grp->faults[i] = p->numa_faults_memory[i]; | 1638 | grp->faults[i] = p->numa_faults_memory[i]; |
1629 | 1639 | ||
1630 | grp->total_faults = p->total_numa_faults; | 1640 | grp->total_faults = p->total_numa_faults; |
@@ -1682,7 +1692,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags, | |||
1682 | 1692 | ||
1683 | double_lock(&my_grp->lock, &grp->lock); | 1693 | double_lock(&my_grp->lock, &grp->lock); |
1684 | 1694 | ||
1685 | for (i = 0; i < 4*nr_node_ids; i++) { | 1695 | for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { |
1686 | my_grp->faults[i] -= p->numa_faults_memory[i]; | 1696 | my_grp->faults[i] -= p->numa_faults_memory[i]; |
1687 | grp->faults[i] += p->numa_faults_memory[i]; | 1697 | grp->faults[i] += p->numa_faults_memory[i]; |
1688 | } | 1698 | } |
@@ -1714,7 +1724,7 @@ void task_numa_free(struct task_struct *p) | |||
1714 | 1724 | ||
1715 | if (grp) { | 1725 | if (grp) { |
1716 | spin_lock(&grp->lock); | 1726 | spin_lock(&grp->lock); |
1717 | for (i = 0; i < 4*nr_node_ids; i++) | 1727 | for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) |
1718 | grp->faults[i] -= p->numa_faults_memory[i]; | 1728 | grp->faults[i] -= p->numa_faults_memory[i]; |
1719 | grp->total_faults -= p->total_numa_faults; | 1729 | grp->total_faults -= p->total_numa_faults; |
1720 | 1730 | ||
@@ -1755,14 +1765,20 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) | |||
1755 | 1765 | ||
1756 | /* Allocate buffer to track faults on a per-node basis */ | 1766 | /* Allocate buffer to track faults on a per-node basis */ |
1757 | if (unlikely(!p->numa_faults_memory)) { | 1767 | if (unlikely(!p->numa_faults_memory)) { |
1758 | int size = sizeof(*p->numa_faults_memory) * 4 * nr_node_ids; | 1768 | int size = sizeof(*p->numa_faults_memory) * |
1769 | NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids; | ||
1759 | 1770 | ||
1760 | /* numa_faults and numa_faults_buffer share the allocation */ | 1771 | p->numa_faults_memory = kzalloc(size, GFP_KERNEL|__GFP_NOWARN); |
1761 | p->numa_faults_memory = kzalloc(size * 2, GFP_KERNEL|__GFP_NOWARN); | ||
1762 | if (!p->numa_faults_memory) | 1772 | if (!p->numa_faults_memory) |
1763 | return; | 1773 | return; |
1764 | 1774 | ||
1765 | BUG_ON(p->numa_faults_buffer_memory); | 1775 | BUG_ON(p->numa_faults_buffer_memory); |
1776 | /* | ||
1777 | * The averaged statistics, shared & private, memory & cpu, | ||
1778 | * occupy the first half of the array. The second half of the | ||
1779 | * array is for current counters, which are averaged into the | ||
1780 | * first set by task_numa_placement. | ||
1781 | */ | ||
1766 | p->numa_faults_cpu = p->numa_faults_memory + (2 * nr_node_ids); | 1782 | p->numa_faults_cpu = p->numa_faults_memory + (2 * nr_node_ids); |
1767 | p->numa_faults_buffer_memory = p->numa_faults_memory + (4 * nr_node_ids); | 1783 | p->numa_faults_buffer_memory = p->numa_faults_memory + (4 * nr_node_ids); |
1768 | p->numa_faults_buffer_cpu = p->numa_faults_memory + (6 * nr_node_ids); | 1784 | p->numa_faults_buffer_cpu = p->numa_faults_memory + (6 * nr_node_ids); |